(itemsListmodel);
+ itemsList.setSelectionMode(ListSelectionModel.SINGLE_INTERVAL_SELECTION);
+ itemsList.setLayoutOrientation(JList.VERTICAL);
+ itemsList.setVisibleRowCount(-1);
+ JScrollPane itemListscrollPane = new JScrollPane(itemsList);
+ itemListscrollPane.setBounds(34, 49, 155, 254);
+ panel_1.add(itemListscrollPane);
+
+ JButton btnEdit = new JButton("Edit");
+ final Application app = this;
+ btnEdit.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+ ItemDefinitions defs = itemsList.getSelectedValue();
+ if(defs == null)
+ return;
+ new ItemDefsEditor(app, defs);
+ }
+ });
+ btnEdit.setBounds(201, 48, 90, 28);
+ panel_1.add(btnEdit);
+
+ JButton btnAdd = new JButton("Add");
+ btnAdd.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+
+ new ItemDefsEditor(app, new ItemDefinitions(STORE, Utils.getItemDefinitionsSize(STORE) , false));
+ }
+ });
+ btnAdd.setBounds(201, 88, 90, 28);
+ panel_1.add(btnAdd);
+
+ JButton btnRemove = new JButton("Remove");
+ btnRemove.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+ ItemDefinitions defs = itemsList.getSelectedValue();
+ if(defs == null)
+ return;
+ STORE.getIndexes()[Constants.ITEM_DEFINITIONS_INDEX].removeFile(defs.getArchiveId(), defs.getFileId());
+ removeItemDefs(defs);
+ }
+ });
+ btnRemove.setBounds(201, 128, 90, 28);
+ panel_1.add(btnRemove);
+
+ JLabel label = new JLabel("Cached Items:");
+ label.setFont(new Font("Comic Sans MS", Font.PLAIN, 18));
+ label.setBounds(34, 18, 155, 21);
+ panel_1.add(label);
+
+ JButton btnDuplicate = new JButton("Clone");
+ btnDuplicate.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+ ItemDefinitions defs = itemsList.getSelectedValue();
+ if(defs == null)
+ return;
+ defs = (ItemDefinitions) defs.clone();
+ if(defs == null)
+ return;
+ defs.id = Utils.getItemDefinitionsSize(STORE);
+ new ItemDefsEditor(app, defs);
+ }
+ });
+ btnDuplicate.setBounds(201, 168, 90, 28);
+ panel_1.add(btnDuplicate);
+ addAllItems();
+ }
+
+ public void addAllItems() {
+ for(int id = 0; id < Utils.getItemDefinitionsSize(STORE) - 22314; id++) {
+ addItemDefs(ItemDefinitions.getItemDefinition(STORE, id));
+ }
+ }
+
+ public void addItemDefs(final ItemDefinitions defs) {
+ EventQueue.invokeLater(new Runnable() {
+ public void run() {
+ itemsListmodel.addElement(defs);
+ }
+ });
+ }
+
+ public void updateItemDefs(final ItemDefinitions defs) {
+ EventQueue.invokeLater(new Runnable() {
+ public void run() {
+ int index = itemsListmodel.indexOf(defs);
+ if(index == -1)
+ itemsListmodel.addElement(defs);
+ else
+ itemsListmodel.setElementAt(defs, index);
+ }
+ });
+ }
+
+ public void removeItemDefs(final ItemDefinitions defs) {
+ EventQueue.invokeLater(new Runnable() {
+ public void run() {
+ itemsListmodel.removeElement(defs);
+ }
+ });
+ }
+
+ public JFrame getFrame() {
+ return frmCacheEditorV;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/com/alex/tools/itemsDefsEditor/GeneratedUkeys.java b/Tools/Cache Editor/src/com/alex/tools/itemsDefsEditor/GeneratedUkeys.java
new file mode 100644
index 000000000..da61674c8
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/tools/itemsDefsEditor/GeneratedUkeys.java
@@ -0,0 +1,48 @@
+package com.alex.tools.itemsDefsEditor;
+
+import java.awt.event.ActionEvent;
+import java.awt.event.ActionListener;
+import java.util.Arrays;
+
+import javax.swing.JButton;
+import javax.swing.JDialog;
+import javax.swing.JEditorPane;
+import javax.swing.JFrame;
+
+@SuppressWarnings("serial")
+public class GeneratedUkeys extends JDialog {
+
+
+ public GeneratedUkeys(JFrame frame, byte[] ukeys) {
+ super(frame, "Ukeys", true);
+ setBounds(100, 100, 450, 300);
+ getContentPane().setLayout(null);
+
+ final JEditorPane editorPane = new JEditorPane();
+ editorPane.setText(Arrays.toString(ukeys));
+ editorPane.setBounds(6, 6, 420, 213);
+ getContentPane().add(editorPane);
+
+ JButton btnClose = new JButton("Close");
+ btnClose.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+ dispose();
+ }
+ });
+ btnClose.setBounds(101, 221, 90, 28);
+ getContentPane().add(btnClose);
+
+ JButton btnCopy = new JButton("Copy");
+ btnCopy.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+ ActionEvent nev = new ActionEvent(editorPane, ActionEvent.ACTION_PERFORMED, "copy");
+ editorPane.selectAll();
+ editorPane.getActionMap().get(nev.getActionCommand()).actionPerformed(nev);
+ }
+ });
+ btnCopy.setBounds(6, 221, 90, 28);
+ getContentPane().add(btnCopy);
+ setDefaultCloseOperation(JDialog.DISPOSE_ON_CLOSE);
+ setVisible(true);
+ }
+}
diff --git a/Tools/Cache Editor/src/com/alex/tools/itemsDefsEditor/ItemDefsEditor.java b/Tools/Cache Editor/src/com/alex/tools/itemsDefsEditor/ItemDefsEditor.java
new file mode 100644
index 000000000..9004820a4
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/tools/itemsDefsEditor/ItemDefsEditor.java
@@ -0,0 +1,404 @@
+package com.alex.tools.itemsDefsEditor;
+
+import java.awt.BorderLayout;
+import java.awt.FlowLayout;
+import java.awt.Font;
+import java.awt.event.ActionEvent;
+import java.awt.event.ActionListener;
+
+import javax.swing.JButton;
+import javax.swing.JCheckBox;
+import javax.swing.JDialog;
+import javax.swing.JLabel;
+import javax.swing.JPanel;
+import javax.swing.JTextField;
+import javax.swing.border.EmptyBorder;
+
+import com.alex.loaders.items.ItemDefinitions;
+
+@SuppressWarnings("serial")
+public class ItemDefsEditor extends JDialog {
+
+ private final JPanel contentPanel = new JPanel();
+ private ItemDefinitions defs;
+ private Application application;
+ private JTextField modelIDField;
+ private JTextField nameField;
+ private JTextField modelZoomField;
+ private JTextField groundOptionsField;
+ private JTextField inventoryOptionsField;
+ private JTextField femaleModelId2Field;
+ private JTextField maleModelId1Field;
+ private JTextField maleModelId2Field;
+ private JTextField maleModelId3Field;
+ private JTextField femaleModelId1Field;
+ private JTextField femaleModelId3Field;
+ private JTextField teamIdField;
+ private JTextField notedItemIdField;
+ private JTextField switchNotedItemField;
+ private JTextField lendedItemIdField;
+ private JTextField switchLendedItemField;
+ private JTextField changedModelColorsField;
+ private JTextField changedTextureColorsField;
+ private JCheckBox membersOnlyCheck;
+
+ public void save() {
+
+ //inv
+ defs.setInvModelId(Integer.valueOf(modelIDField.getText()));
+ defs.setName(nameField.getText());
+ defs.setInvModelZoom(Integer.valueOf(modelZoomField.getText()));
+ String[] groundOptions = groundOptionsField.getText().split(";");
+ for(int i = 0; i < defs.getGroundOptions().length; i++)
+ defs.getGroundOptions()[i] = groundOptions[i].equals("null") ? null : groundOptions[i];
+ String[] invOptions = inventoryOptionsField.getText().split(";");
+ for(int i = 0; i < defs.getInventoryOptions().length; i++)
+ defs.getInventoryOptions()[i] = invOptions[i].equals("null") ? null : invOptions[i];
+
+ //wearing
+
+ defs.maleEquipModelId1 = Integer.valueOf(maleModelId1Field.getText());
+ defs.maleEquipModelId2 = Integer.valueOf(maleModelId2Field.getText());
+ defs.maleEquipModelId3 = Integer.valueOf(maleModelId3Field.getText());
+
+ defs.femaleEquipModelId1 = Integer.valueOf(femaleModelId1Field.getText());
+ defs.femaleEquipModelId2 = Integer.valueOf(femaleModelId2Field.getText());
+ defs.femaleEquipModelId3 = Integer.valueOf(femaleModelId3Field.getText());
+ defs.teamId = Integer.valueOf(teamIdField.getText());
+
+ //others
+ defs.notedItemId = Integer.valueOf(notedItemIdField.getText());
+ defs.switchNoteItemId = Integer.valueOf(switchNotedItemField.getText());
+ defs.lendedItemId = Integer.valueOf(lendedItemIdField.getText());
+ defs.switchLendItemId = Integer.valueOf(switchLendedItemField.getText());
+ defs.resetModelColors();
+ if(!changedModelColorsField.getText().equals("")) {
+ String[] splitedModelColorsTexts = changedModelColorsField.getText().split(";");
+ for(String t : splitedModelColorsTexts) {
+ String[] editedColor = t.split("=");
+ defs.changeModelColor(Integer.valueOf(editedColor[0]), Integer.valueOf(editedColor[1]));
+ }
+ }
+ defs.resetTextureColors();
+ if(!changedTextureColorsField.getText().equals("")) {
+ String[] splitedTextureColorsTexts = changedTextureColorsField.getText().split(";");
+ for(String t : splitedTextureColorsTexts) {
+ String[] editedColor = t.split("=");
+ defs.changeTextureColor(Integer.valueOf(editedColor[0]), Integer.valueOf(editedColor[1]));
+ }
+ }
+ defs.membersOnly = membersOnlyCheck.isSelected();
+ defs.write(Application.STORE);
+ application.updateItemDefs(defs);
+ }
+
+ /**
+ * Create the dialog.
+ */
+ public ItemDefsEditor(Application application, ItemDefinitions defs) {
+ super(application.getFrame(), "Item Definitions Editor", true);
+ this.defs = defs;
+ this.application = application;
+ setBounds(100, 100, 912, 354);
+ getContentPane().setLayout(new BorderLayout());
+ contentPanel.setBorder(new EmptyBorder(5, 5, 5, 5));
+ getContentPane().add(contentPanel, BorderLayout.CENTER);
+ contentPanel.setLayout(null);
+
+ JLabel lblNewLabel = new JLabel("Model ID:");
+ lblNewLabel.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ lblNewLabel.setBounds(6, 43, 81, 21);
+ contentPanel.add(lblNewLabel);
+ {
+ modelIDField = new JTextField();
+ modelIDField.setBounds(139, 40, 122, 28);
+ contentPanel.add(modelIDField);
+ modelIDField.setColumns(10);
+ modelIDField.setText(""+defs.getInvModelId());
+ }
+ {
+ JLabel label = new JLabel("Name:");
+ label.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label.setBounds(6, 76, 81, 21);
+ contentPanel.add(label);
+ }
+ {
+ nameField = new JTextField();
+ nameField.setBounds(139, 73, 122, 28);
+ contentPanel.add(nameField);
+ nameField.setColumns(10);
+ nameField.setText(defs.getName());
+ }
+ {
+ JLabel label = new JLabel("Model Zoom:");
+ label.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label.setBounds(6, 109, 95, 21);
+ contentPanel.add(label);
+ }
+ {
+ modelZoomField = new JTextField();
+ modelZoomField.setBounds(139, 106, 122, 28);
+ contentPanel.add(modelZoomField);
+ modelZoomField.setColumns(10);
+ modelZoomField.setText(""+defs.getInvModelZoom());
+ }
+ {
+ JLabel label = new JLabel("Ground Options:");
+ label.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label.setBounds(6, 142, 108, 21);
+ contentPanel.add(label);
+ }
+ {
+ groundOptionsField = new JTextField();
+ groundOptionsField.setBounds(139, 139, 122, 28);
+ contentPanel.add(groundOptionsField);
+ groundOptionsField.setColumns(10);
+ String text = "";
+ for(String option : defs.getGroundOptions())
+ text += (option == null ? "null" : option)+";";
+ groundOptionsField.setText(text);
+ }
+ {
+ JLabel label = new JLabel("Inventory Options:");
+ label.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label.setBounds(6, 175, 139, 21);
+ contentPanel.add(label);
+ }
+ {
+ inventoryOptionsField = new JTextField();
+ inventoryOptionsField.setBounds(139, 172, 122, 28);
+ contentPanel.add(inventoryOptionsField);
+ inventoryOptionsField.setColumns(10);
+ String text = "";
+ for(String option : defs.getInventoryOptions())
+ text += (option == null ? "null" : option)+";";
+ inventoryOptionsField.setText(text);
+ }
+ {
+ JButton saveButton = new JButton("Save");
+ saveButton.setBounds(6, 265, 55, 28);
+ contentPanel.add(saveButton);
+ saveButton.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+ save();
+ dispose();
+ }
+ });
+ getRootPane().setDefaultButton(saveButton);
+ }
+ {
+ JButton cancelButton = new JButton("Cancel");
+ cancelButton.setBounds(73, 265, 67, 28);
+ contentPanel.add(cancelButton);
+ cancelButton.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+ dispose();
+ }
+ });
+ cancelButton.setActionCommand("Cancel");
+ }
+
+ JLabel label = new JLabel("Interface / Droped");
+ label.setFont(new Font("Comic Sans MS", Font.PLAIN, 18));
+ label.setBounds(6, 6, 205, 21);
+ contentPanel.add(label);
+
+ JLabel label_1 = new JLabel("Wearing");
+ label_1.setFont(new Font("Comic Sans MS", Font.PLAIN, 18));
+ label_1.setBounds(273, 6, 205, 21);
+ contentPanel.add(label_1);
+
+ JLabel label_2 = new JLabel("Male Model ID 1:");
+ label_2.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_2.setBounds(273, 43, 131, 21);
+ contentPanel.add(label_2);
+
+ JLabel label_3 = new JLabel("Male Model ID 2:");
+ label_3.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_3.setBounds(273, 76, 131, 21);
+ contentPanel.add(label_3);
+
+ JLabel label_4 = new JLabel("Male Model ID 3:");
+ label_4.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_4.setBounds(273, 112, 131, 21);
+ contentPanel.add(label_4);
+
+ JLabel label_5 = new JLabel("Female Model ID 1:");
+ label_5.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_5.setBounds(273, 145, 131, 21);
+ contentPanel.add(label_5);
+
+ JLabel label_6 = new JLabel("Female Model ID 2:");
+ label_6.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_6.setBounds(273, 175, 131, 21);
+ contentPanel.add(label_6);
+
+ JLabel label_7 = new JLabel("Female Model ID 3:");
+ label_7.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_7.setBounds(273, 208, 131, 21);
+ contentPanel.add(label_7);
+
+ femaleModelId2Field = new JTextField();
+ femaleModelId2Field.setBounds(411, 172, 122, 28);
+ contentPanel.add(femaleModelId2Field);
+ femaleModelId2Field.setColumns(10);
+ femaleModelId2Field.setText(""+defs.femaleEquipModelId2);
+
+ maleModelId1Field = new JTextField();
+ maleModelId1Field.setBounds(411, 40, 122, 28);
+ contentPanel.add(maleModelId1Field);
+ maleModelId1Field.setColumns(10);
+ maleModelId1Field.setText(""+defs.maleEquipModelId1);
+ {
+ maleModelId2Field = new JTextField();
+ maleModelId2Field.setBounds(411, 73, 122, 28);
+ contentPanel.add(maleModelId2Field);
+ maleModelId2Field.setColumns(10);
+ maleModelId2Field.setText(""+defs.maleEquipModelId2);
+ }
+ {
+ maleModelId3Field = new JTextField();
+ maleModelId3Field.setBounds(411, 106, 122, 28);
+ contentPanel.add(maleModelId3Field);
+ maleModelId3Field.setColumns(10);
+ maleModelId3Field.setText(""+defs.maleEquipModelId3);
+ }
+ {
+ femaleModelId1Field = new JTextField();
+ femaleModelId1Field.setBounds(411, 139, 122, 28);
+ contentPanel.add(femaleModelId1Field);
+ femaleModelId1Field.setColumns(10);
+ femaleModelId1Field.setText(""+defs.femaleEquipModelId1);
+ }
+ {
+ femaleModelId3Field = new JTextField();
+ femaleModelId3Field.setBounds(411, 205, 122, 28);
+ contentPanel.add(femaleModelId3Field);
+ femaleModelId3Field.setColumns(10);
+ femaleModelId3Field.setText(""+defs.femaleEquipModelId3);
+ }
+ {
+ JLabel label_8 = new JLabel("Team ID:");
+ label_8.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_8.setBounds(273, 241, 131, 21);
+ contentPanel.add(label_8);
+ }
+ {
+ teamIdField = new JTextField();
+ teamIdField.setBounds(411, 238, 122, 28);
+ contentPanel.add(teamIdField);
+ teamIdField.setColumns(10);
+ teamIdField.setText(""+defs.teamId);
+ }
+ {
+ JLabel label_8 = new JLabel("Others");
+ label_8.setFont(new Font("Comic Sans MS", Font.PLAIN, 18));
+ label_8.setBounds(539, 6, 205, 21);
+ contentPanel.add(label_8);
+ }
+ {
+ JLabel label_8 = new JLabel("Noted Item ID:");
+ label_8.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_8.setBounds(545, 43, 131, 21);
+ contentPanel.add(label_8);
+ }
+ {
+ JLabel label_8 = new JLabel("Switch Noted Item Id:");
+ label_8.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_8.setBounds(545, 76, 160, 21);
+ contentPanel.add(label_8);
+ }
+ {
+ JLabel label_8 = new JLabel("Lended Item ID:");
+ label_8.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_8.setBounds(545, 109, 160, 21);
+ contentPanel.add(label_8);
+ }
+ {
+ JLabel label_8 = new JLabel("Switch Lended Item Id:");
+ label_8.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_8.setBounds(545, 145, 160, 21);
+ contentPanel.add(label_8);
+ }
+ {
+ notedItemIdField = new JTextField();
+ notedItemIdField.setBounds(707, 39, 122, 28);
+ contentPanel.add(notedItemIdField);
+ notedItemIdField.setColumns(10);
+ notedItemIdField.setText(""+defs.notedItemId);
+ }
+ {
+ switchNotedItemField = new JTextField();
+ switchNotedItemField.setBounds(707, 73, 122, 28);
+ contentPanel.add(switchNotedItemField);
+ switchNotedItemField.setColumns(10);
+ switchNotedItemField.setText(""+defs.switchNoteItemId);
+ }
+ {
+ lendedItemIdField = new JTextField();
+ lendedItemIdField.setBounds(707, 106, 122, 28);
+ contentPanel.add(lendedItemIdField);
+ lendedItemIdField.setColumns(10);
+ lendedItemIdField.setText(""+defs.lendedItemId);
+ }
+ {
+ switchLendedItemField = new JTextField();
+ switchLendedItemField.setBounds(707, 139, 122, 28);
+ contentPanel.add(switchLendedItemField);
+ switchLendedItemField.setColumns(10);
+ switchLendedItemField.setText(""+defs.switchLendItemId);
+ }
+ {
+ JLabel label_8 = new JLabel("Changed Model Colors:");
+ label_8.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_8.setBounds(545, 175, 160, 21);
+ contentPanel.add(label_8);
+ }
+ {
+ changedModelColorsField = new JTextField();
+ changedModelColorsField.setBounds(707, 172, 122, 28);
+ contentPanel.add(changedModelColorsField);
+ changedModelColorsField.setColumns(10);
+ String text = "";
+ if(defs.originalModelColors != null) {
+ for(int i = 0; i < defs.originalModelColors.length; i++) {
+ text += defs.originalModelColors[i]+"="+defs.modifiedModelColors[i]+";";
+ }
+ }
+ changedModelColorsField.setText(text);
+ }
+ {
+ JLabel label_8 = new JLabel("Changed Texture Colors:");
+ label_8.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ label_8.setBounds(545, 205, 160, 21);
+ contentPanel.add(label_8);
+ }
+ {
+ changedTextureColorsField = new JTextField();
+ changedTextureColorsField.setBounds(707, 205, 122, 28);
+ contentPanel.add(changedTextureColorsField);
+ changedTextureColorsField.setColumns(10);
+ String text = "";
+ if(defs.originalTextureColors != null) {
+ for(int i = 0; i < defs.originalTextureColors.length; i++) {
+ text += defs.originalTextureColors[i]+"="+defs.modifiedTextureColors[i]+";";
+ }
+ }
+ changedTextureColorsField.setText(text);
+ }
+
+ membersOnlyCheck = new JCheckBox("Members Only");
+ membersOnlyCheck.setFont(new Font("Comic Sans MS", Font.PLAIN, 14));
+ membersOnlyCheck.setBounds(545, 243, 131, 18);
+ membersOnlyCheck.setSelected(defs.membersOnly);
+ contentPanel.add(membersOnlyCheck);
+ {
+ JPanel buttonPane = new JPanel();
+ buttonPane.setLayout(new FlowLayout(FlowLayout.RIGHT));
+ getContentPane().add(buttonPane, BorderLayout.SOUTH);
+ }
+
+ setDefaultCloseOperation(JDialog.DISPOSE_ON_CLOSE);
+ setVisible(true);
+ }
+}
diff --git a/Tools/Cache Editor/src/com/alex/util/bzip2/BZip2BlockEntry.java b/Tools/Cache Editor/src/com/alex/util/bzip2/BZip2BlockEntry.java
new file mode 100644
index 000000000..4f941a098
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/util/bzip2/BZip2BlockEntry.java
@@ -0,0 +1,56 @@
+package com.alex.util.bzip2;
+
+public class BZip2BlockEntry {
+
+ boolean aBooleanArray2205[];
+ boolean aBooleanArray2213[];
+ byte aByte2201;
+ byte aByteArray2204[];
+ byte aByteArray2211[];
+ byte aByteArray2212[];
+ byte aByteArray2214[];
+ byte aByteArray2219[];
+ byte aByteArray2224[];
+ byte aByteArrayArray2229[][];
+ int anInt2202;
+ int anInt2203;
+ int anInt2206;
+ int anInt2207;
+ int anInt2208;
+ int anInt2209;
+ int anInt2215;
+ int anInt2216;
+ int anInt2217;
+ int anInt2221;
+ int anInt2222;
+ int anInt2223;
+ int anInt2225;
+ int anInt2227;
+ int anInt2232;
+ int anIntArray2200[];
+ int anIntArray2220[];
+ int anIntArray2226[];
+ int anIntArray2228[];
+ int anIntArrayArray2210[][];
+ int anIntArrayArray2218[][];
+ int anIntArrayArray2230[][];
+
+ public BZip2BlockEntry() {
+ anIntArray2200 = new int[6];
+ anInt2203 = 0;
+ aByteArray2204 = new byte[4096];
+ aByteArray2211 = new byte[256];
+ aByteArray2214 = new byte[18002];
+ aByteArray2219 = new byte[18002];
+ anIntArray2220 = new int[257];
+ anIntArrayArray2218 = new int[6][258];
+ aBooleanArray2205 = new boolean[16];
+ aBooleanArray2213 = new boolean[256];
+ anInt2209 = 0;
+ anIntArray2226 = new int[16];
+ anIntArrayArray2210 = new int[6][258];
+ aByteArrayArray2229 = new byte[6][258];
+ anIntArrayArray2230 = new int[6][258];
+ anIntArray2228 = new int[256];
+ }
+}
diff --git a/Tools/Cache Editor/src/com/alex/util/bzip2/BZip2Compressor.java b/Tools/Cache Editor/src/com/alex/util/bzip2/BZip2Compressor.java
new file mode 100644
index 000000000..2d51b995d
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/util/bzip2/BZip2Compressor.java
@@ -0,0 +1,22 @@
+package com.alex.util.bzip2;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+
+import org.apache.tools.bzip2.CBZip2OutputStream;
+
+public class BZip2Compressor {
+
+ public static final byte[] compress(byte[] data) {
+ ByteArrayOutputStream compressedBytes = new ByteArrayOutputStream();
+ try {
+ CBZip2OutputStream out = new CBZip2OutputStream(compressedBytes);
+ out.write(data);
+ out.close();
+ return compressedBytes.toByteArray();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+}
diff --git a/Tools/Cache Editor/src/com/alex/util/bzip2/BZip2Decompressor.java b/Tools/Cache Editor/src/com/alex/util/bzip2/BZip2Decompressor.java
new file mode 100644
index 000000000..8ca0affd0
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/util/bzip2/BZip2Decompressor.java
@@ -0,0 +1,546 @@
+package com.alex.util.bzip2;
+
+
+
+public class BZip2Decompressor {
+
+ private static int anIntArray257[];
+ private static BZip2BlockEntry entryInstance = new BZip2BlockEntry();
+
+ public static final void decompress(byte decompressedData[], byte packedData[], int containerSize, int blockSize) {
+ synchronized (entryInstance) {
+ entryInstance.aByteArray2224 = packedData;
+ entryInstance.anInt2209 = blockSize;
+ entryInstance.aByteArray2212 = decompressedData;
+ entryInstance.anInt2203 = 0;
+ entryInstance.anInt2206 = decompressedData.length;
+ entryInstance.anInt2232 = 0;
+ entryInstance.anInt2207 = 0;
+ entryInstance.anInt2217 = 0;
+ entryInstance.anInt2216 = 0;
+ method1793(entryInstance);
+ entryInstance.aByteArray2224 = null;
+ entryInstance.aByteArray2212 = null;
+ }
+ }
+
+ private static final void method1785(BZip2BlockEntry entry) {
+ entry.anInt2215 = 0;
+ for (int i = 0; i < 256; i++) {
+ if (entry.aBooleanArray2213[i]) {
+ entry.aByteArray2211[entry.anInt2215] = (byte) i;
+ entry.anInt2215++;
+ }
+ }
+
+ }
+
+ private static final void method1786(int ai[], int ai1[], int ai2[],
+ byte abyte0[], int i, int j, int k) {
+ int l = 0;
+ for (int i1 = i; i1 <= j; i1++) {
+ for (int l2 = 0; l2 < k; l2++) {
+ if (abyte0[l2] == i1) {
+ ai2[l] = l2;
+ l++;
+ }
+ }
+
+ }
+
+ for (int j1 = 0; j1 < 23; j1++) {
+ ai1[j1] = 0;
+ }
+
+ for (int k1 = 0; k1 < k; k1++) {
+ ai1[abyte0[k1] + 1]++;
+ }
+
+ for (int l1 = 1; l1 < 23; l1++) {
+ ai1[l1] += ai1[l1 - 1];
+ }
+
+ for (int i2 = 0; i2 < 23; i2++) {
+ ai[i2] = 0;
+ }
+
+ int i3 = 0;
+ for (int j2 = i; j2 <= j; j2++) {
+ i3 += ai1[j2 + 1] - ai1[j2];
+ ai[j2] = i3 - 1;
+ i3 <<= 1;
+ }
+
+ for (int k2 = i + 1; k2 <= j; k2++) {
+ ai1[k2] = (ai[k2 - 1] + 1 << 1) - ai1[k2];
+ }
+
+ }
+
+ private static final void method1787(BZip2BlockEntry entry) {
+ byte byte4 = entry.aByte2201;
+ int i = entry.anInt2222;
+ int j = entry.anInt2227;
+ int k = entry.anInt2221;
+ int ai[] = anIntArray257;
+ int l = entry.anInt2208;
+ byte abyte0[] = entry.aByteArray2212;
+ int i1 = entry.anInt2203;
+ int j1 = entry.anInt2206;
+ int k1 = j1;
+ int l1 = entry.anInt2225 + 1;
+ label0: do {
+ if (i > 0) {
+ do {
+ if (j1 == 0) {
+ break label0;
+ }
+ if (i == 1) {
+ break;
+ }
+ abyte0[i1] = byte4;
+ i--;
+ i1++;
+ j1--;
+ } while (true);
+ if (j1 == 0) {
+ i = 1;
+ break;
+ }
+ abyte0[i1] = byte4;
+ i1++;
+ j1--;
+ }
+ boolean flag = true;
+ while (flag) {
+ flag = false;
+ if (j == l1) {
+ i = 0;
+ break label0;
+ }
+ byte4 = (byte) k;
+ l = ai[l];
+ byte byte0 = (byte) (l & 0xff);
+ l >>= 8;
+ j++;
+ if (byte0 != k) {
+ k = byte0;
+ if (j1 == 0) {
+ i = 1;
+ } else {
+ abyte0[i1] = byte4;
+ i1++;
+ j1--;
+ flag = true;
+ continue;
+ }
+ break label0;
+ }
+ if (j != l1) {
+ continue;
+ }
+ if (j1 == 0) {
+ i = 1;
+ break label0;
+ }
+ abyte0[i1] = byte4;
+ i1++;
+ j1--;
+ flag = true;
+ }
+ i = 2;
+ l = ai[l];
+ byte byte1 = (byte) (l & 0xff);
+ l >>= 8;
+ if (++j != l1) {
+ if (byte1 != k) {
+ k = byte1;
+ } else {
+ i = 3;
+ l = ai[l];
+ byte byte2 = (byte) (l & 0xff);
+ l >>= 8;
+ if (++j != l1) {
+ if (byte2 != k) {
+ k = byte2;
+ } else {
+ l = ai[l];
+ byte byte3 = (byte) (l & 0xff);
+ l >>= 8;
+ j++;
+ i = (byte3 & 0xff) + 4;
+ l = ai[l];
+ k = (byte) (l & 0xff);
+ l >>= 8;
+ j++;
+ }
+ }
+ }
+ }
+ } while (true);
+ entry.anInt2216 += k1 - j1;
+ entry.aByte2201 = byte4;
+ entry.anInt2222 = i;
+ entry.anInt2227 = j;
+ entry.anInt2221 = k;
+ anIntArray257 = ai;
+ entry.anInt2208 = l;
+ entry.aByteArray2212 = abyte0;
+ entry.anInt2203 = i1;
+ entry.anInt2206 = j1;
+ }
+
+ private static final byte method1788(BZip2BlockEntry entry) {
+ return (byte) method1790(1, entry);
+ }
+
+ private static final byte method1789(BZip2BlockEntry entry) {
+ return (byte) method1790(8, entry);
+ }
+
+ private static final int method1790(int i, BZip2BlockEntry entry) {
+ int j;
+ do {
+ if (entry.anInt2232 >= i) {
+ int k = entry.anInt2207 >> entry.anInt2232 - i & (1 << i) - 1;
+ entry.anInt2232 -= i;
+ j = k;
+ break;
+ }
+ entry.anInt2207 = entry.anInt2207 << 8
+ | entry.aByteArray2224[entry.anInt2209] & 0xff;
+ entry.anInt2232 += 8;
+ entry.anInt2209++;
+ entry.anInt2217++;
+ } while (true);
+ return j;
+ }
+
+ public static void clearBlockEntryInstance() {
+ entryInstance = null;
+ }
+
+ private static final void method1793(BZip2BlockEntry entry) {
+ // unused
+ /*
+ * boolean flag = false; boolean flag1 = false; boolean flag2 = false;
+ * boolean flag3 = false; boolean flag4 = false; boolean flag5 = false;
+ * boolean flag6 = false; boolean flag7 = false; boolean flag8 = false;
+ * boolean flag9 = false; boolean flag10 = false; boolean flag11 =
+ * false; boolean flag12 = false; boolean flag13 = false; boolean flag14
+ * = false; boolean flag15 = false; boolean flag16 = false; boolean
+ * flag17 = false;
+ */
+ int j8 = 0;
+ int ai[] = null;
+ int ai1[] = null;
+ int ai2[] = null;
+ entry.anInt2202 = 1;
+ if (anIntArray257 == null) {
+ anIntArray257 = new int[entry.anInt2202 * 0x186a0];
+ }
+ boolean flag18 = true;
+ while (flag18) {
+ byte byte0 = method1789(entry);
+ if (byte0 == 23) {
+ return;
+ }
+ byte0 = method1789(entry);
+ byte0 = method1789(entry);
+ byte0 = method1789(entry);
+ byte0 = method1789(entry);
+ byte0 = method1789(entry);
+ byte0 = method1789(entry);
+ byte0 = method1789(entry);
+ byte0 = method1789(entry);
+ byte0 = method1789(entry);
+ byte0 = method1788(entry);
+ entry.anInt2223 = 0;
+ byte0 = method1789(entry);
+ entry.anInt2223 = entry.anInt2223 << 8 | byte0 & 0xff;
+ byte0 = method1789(entry);
+ entry.anInt2223 = entry.anInt2223 << 8 | byte0 & 0xff;
+ byte0 = method1789(entry);
+ entry.anInt2223 = entry.anInt2223 << 8 | byte0 & 0xff;
+ for (int j = 0; j < 16; j++) {
+ byte byte1 = method1788(entry);
+ if (byte1 == 1) {
+ entry.aBooleanArray2205[j] = true;
+ } else {
+ entry.aBooleanArray2205[j] = false;
+ }
+ }
+
+ for (int k = 0; k < 256; k++) {
+ entry.aBooleanArray2213[k] = false;
+ }
+
+ for (int l = 0; l < 16; l++) {
+ if (entry.aBooleanArray2205[l]) {
+ for (int i3 = 0; i3 < 16; i3++) {
+ byte byte2 = method1788(entry);
+ if (byte2 == 1) {
+ entry.aBooleanArray2213[l * 16 + i3] = true;
+ }
+ }
+
+ }
+ }
+
+ method1785(entry);
+ int i4 = entry.anInt2215 + 2;
+ int j4 = method1790(3, entry);
+ int k4 = method1790(15, entry);
+ for (int i1 = 0; i1 < k4; i1++) {
+ int j3 = 0;
+ do {
+ byte byte3 = method1788(entry);
+ if (byte3 == 0) {
+ break;
+ }
+ j3++;
+ } while (true);
+ entry.aByteArray2214[i1] = (byte) j3;
+ }
+
+ byte abyte0[] = new byte[6];
+ for (byte byte16 = 0; byte16 < j4; byte16++) {
+ abyte0[byte16] = byte16;
+ }
+
+ for (int j1 = 0; j1 < k4; j1++) {
+ byte byte17 = entry.aByteArray2214[j1];
+ byte byte15 = abyte0[byte17];
+ for (; byte17 > 0; byte17--) {
+ abyte0[byte17] = abyte0[byte17 - 1];
+ }
+
+ abyte0[0] = byte15;
+ entry.aByteArray2219[j1] = byte15;
+ }
+
+ for (int k3 = 0; k3 < j4; k3++) {
+ int k6 = method1790(5, entry);
+ for (int k1 = 0; k1 < i4; k1++) {
+ do {
+ byte byte4 = method1788(entry);
+ if (byte4 == 0) {
+ break;
+ }
+ byte4 = method1788(entry);
+ if (byte4 == 0) {
+ k6++;
+ } else {
+ k6--;
+ }
+ } while (true);
+ entry.aByteArrayArray2229[k3][k1] = (byte) k6;
+ }
+
+ }
+
+ for (int l3 = 0; l3 < j4; l3++) {
+ byte byte8 = 32;
+ int i = 0;
+ for (int l1 = 0; l1 < i4; l1++) {
+ if (entry.aByteArrayArray2229[l3][l1] > i) {
+ i = entry.aByteArrayArray2229[l3][l1];
+ }
+ if (entry.aByteArrayArray2229[l3][l1] < byte8) {
+ byte8 = entry.aByteArrayArray2229[l3][l1];
+ }
+ }
+
+ method1786(entry.anIntArrayArray2230[l3],
+ entry.anIntArrayArray2218[l3],
+ entry.anIntArrayArray2210[l3],
+ entry.aByteArrayArray2229[l3], byte8, i, i4);
+ entry.anIntArray2200[l3] = byte8;
+ }
+
+ int l4 = entry.anInt2215 + 1;
+ int i5 = -1;
+ int j5 = 0;
+ for (int i2 = 0; i2 <= 255; i2++) {
+ entry.anIntArray2228[i2] = 0;
+ }
+
+ int i9 = 4095;
+ for (int k8 = 15; k8 >= 0; k8--) {
+ for (int l8 = 15; l8 >= 0; l8--) {
+ entry.aByteArray2204[i9] = (byte) (k8 * 16 + l8);
+ i9--;
+ }
+
+ entry.anIntArray2226[k8] = i9 + 1;
+ }
+
+ int l5 = 0;
+ if (j5 == 0) {
+ i5++;
+ j5 = 50;
+ byte byte12 = entry.aByteArray2219[i5];
+ j8 = entry.anIntArray2200[byte12];
+ ai = entry.anIntArrayArray2230[byte12];
+ ai2 = entry.anIntArrayArray2210[byte12];
+ ai1 = entry.anIntArrayArray2218[byte12];
+ }
+ j5--;
+ int l6 = j8;
+ int k7;
+ byte byte9;
+ for (k7 = method1790(l6, entry); k7 > ai[l6]; k7 = k7 << 1 | byte9) {
+ l6++;
+ byte9 = method1788(entry);
+ }
+
+ for (int k5 = ai2[k7 - ai1[l6]]; k5 != l4;) {
+ if (k5 == 0 || k5 == 1) {
+ int i6 = -1;
+ int j6 = 1;
+ do {
+ if (k5 == 0) {
+ i6 += j6;
+ } else if (k5 == 1) {
+ i6 += 2 * j6;
+ }
+ j6 *= 2;
+ if (j5 == 0) {
+ i5++;
+ j5 = 50;
+ byte byte13 = entry.aByteArray2219[i5];
+ j8 = entry.anIntArray2200[byte13];
+ ai = entry.anIntArrayArray2230[byte13];
+ ai2 = entry.anIntArrayArray2210[byte13];
+ ai1 = entry.anIntArrayArray2218[byte13];
+ }
+ j5--;
+ int i7 = j8;
+ int l7;
+ byte byte10;
+ for (l7 = method1790(i7, entry); l7 > ai[i7]; l7 = l7 << 1
+ | byte10) {
+ i7++;
+ byte10 = method1788(entry);
+ }
+
+ k5 = ai2[l7 - ai1[i7]];
+ } while (k5 == 0 || k5 == 1);
+ i6++;
+ byte byte5 = entry.aByteArray2211[entry.aByteArray2204[entry.anIntArray2226[0]] & 0xff];
+ entry.anIntArray2228[byte5 & 0xff] += i6;
+ for (; i6 > 0; i6--) {
+ anIntArray257[l5] = byte5 & 0xff;
+ l5++;
+ }
+
+ } else {
+ int i11 = k5 - 1;
+ byte byte6;
+ if (i11 < 16) {
+ int i10 = entry.anIntArray2226[0];
+ byte6 = entry.aByteArray2204[i10 + i11];
+ for (; i11 > 3; i11 -= 4) {
+ int j11 = i10 + i11;
+ entry.aByteArray2204[j11] = entry.aByteArray2204[j11 - 1];
+ entry.aByteArray2204[j11 - 1] = entry.aByteArray2204[j11 - 2];
+ entry.aByteArray2204[j11 - 2] = entry.aByteArray2204[j11 - 3];
+ entry.aByteArray2204[j11 - 3] = entry.aByteArray2204[j11 - 4];
+ }
+
+ for (; i11 > 0; i11--) {
+ entry.aByteArray2204[i10 + i11] = entry.aByteArray2204[(i10 + i11) - 1];
+ }
+
+ entry.aByteArray2204[i10] = byte6;
+ } else {
+ int k10 = i11 / 16;
+ int l10 = i11 % 16;
+ int j10 = entry.anIntArray2226[k10] + l10;
+ byte6 = entry.aByteArray2204[j10];
+ for (; j10 > entry.anIntArray2226[k10]; j10--) {
+ entry.aByteArray2204[j10] = entry.aByteArray2204[j10 - 1];
+ }
+
+ entry.anIntArray2226[k10]++;
+ for (; k10 > 0; k10--) {
+ entry.anIntArray2226[k10]--;
+ entry.aByteArray2204[entry.anIntArray2226[k10]] = entry.aByteArray2204[(entry.anIntArray2226[k10 - 1] + 16) - 1];
+ }
+
+ entry.anIntArray2226[0]--;
+ entry.aByteArray2204[entry.anIntArray2226[0]] = byte6;
+ if (entry.anIntArray2226[0] == 0) {
+ int l9 = 4095;
+ for (int j9 = 15; j9 >= 0; j9--) {
+ for (int k9 = 15; k9 >= 0; k9--) {
+ entry.aByteArray2204[l9] = entry.aByteArray2204[entry.anIntArray2226[j9]
+ + k9];
+ l9--;
+ }
+
+ entry.anIntArray2226[j9] = l9 + 1;
+ }
+
+ }
+ }
+ entry.anIntArray2228[entry.aByteArray2211[byte6 & 0xff] & 0xff]++;
+ anIntArray257[l5] = entry.aByteArray2211[byte6 & 0xff] & 0xff;
+ l5++;
+ if (j5 == 0) {
+ i5++;
+ j5 = 50;
+ byte byte14 = entry.aByteArray2219[i5];
+ j8 = entry.anIntArray2200[byte14];
+ ai = entry.anIntArrayArray2230[byte14];
+ ai2 = entry.anIntArrayArray2210[byte14];
+ ai1 = entry.anIntArrayArray2218[byte14];
+ }
+ j5--;
+ int j7 = j8;
+ int i8;
+ byte byte11;
+ for (i8 = method1790(j7, entry); i8 > ai[j7]; i8 = i8 << 1
+ | byte11) {
+ j7++;
+ byte11 = method1788(entry);
+ }
+
+ k5 = ai2[i8 - ai1[j7]];
+ }
+ }
+
+ entry.anInt2222 = 0;
+ entry.aByte2201 = 0;
+ entry.anIntArray2220[0] = 0;
+ for (int j2 = 1; j2 <= 256; j2++) {
+ entry.anIntArray2220[j2] = entry.anIntArray2228[j2 - 1];
+ }
+
+ for (int k2 = 1; k2 <= 256; k2++) {
+ entry.anIntArray2220[k2] += entry.anIntArray2220[k2 - 1];
+ }
+
+ for (int l2 = 0; l2 < l5; l2++) {
+ byte byte7 = (byte) (anIntArray257[l2] & 0xff);
+ anIntArray257[entry.anIntArray2220[byte7 & 0xff]] |= l2 << 8;
+ entry.anIntArray2220[byte7 & 0xff]++;
+ }
+
+ entry.anInt2208 = anIntArray257[entry.anInt2223] >> 8;
+ entry.anInt2227 = 0;
+ entry.anInt2208 = anIntArray257[entry.anInt2208];
+ entry.anInt2221 = (byte) (entry.anInt2208 & 0xff);
+ entry.anInt2208 >>= 8;
+ entry.anInt2227++;
+ entry.anInt2225 = l5;
+ method1787(entry);
+ if (entry.anInt2227 == entry.anInt2225 + 1 && entry.anInt2222 == 0) {
+ flag18 = true;
+ } else {
+ flag18 = false;
+ }
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/com/alex/util/crc32/CRC32HGenerator.java b/Tools/Cache Editor/src/com/alex/util/crc32/CRC32HGenerator.java
new file mode 100644
index 000000000..e38e49ea9
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/util/crc32/CRC32HGenerator.java
@@ -0,0 +1,26 @@
+package com.alex.util.crc32;
+
+import java.util.zip.CRC32;
+
+public final class CRC32HGenerator {
+
+ public static final CRC32 CRC32Instance = new CRC32();
+
+ public static int getHash(byte[] data) {
+ return getHash(data, 0, data.length);
+ }
+
+ public static int getHash(byte[] data, int offset, int length) {
+ synchronized(CRC32Instance) {
+ CRC32Instance.update(data, offset, length);
+ int hash = (int) CRC32Instance.getValue();
+ CRC32Instance.reset();
+ return hash;
+ }
+ }
+
+
+ private CRC32HGenerator() {
+
+ }
+}
diff --git a/Tools/Cache Editor/src/com/alex/util/gzip/GZipCompressor.java b/Tools/Cache Editor/src/com/alex/util/gzip/GZipCompressor.java
new file mode 100644
index 000000000..68bad73c7
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/util/gzip/GZipCompressor.java
@@ -0,0 +1,22 @@
+package com.alex.util.gzip;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.zip.GZIPOutputStream;
+
+public class GZipCompressor {
+
+ public static final byte[] compress(byte[] data) {
+ ByteArrayOutputStream compressedBytes = new ByteArrayOutputStream();
+ try {
+ GZIPOutputStream out = new GZIPOutputStream(compressedBytes);
+ out.write(data);
+ out.finish();
+ out.close();
+ return compressedBytes.toByteArray();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+}
diff --git a/Tools/Cache Editor/src/com/alex/util/gzip/GZipDecompressor.java b/Tools/Cache Editor/src/com/alex/util/gzip/GZipDecompressor.java
new file mode 100644
index 000000000..96260f490
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/util/gzip/GZipDecompressor.java
@@ -0,0 +1,48 @@
+package com.alex.util.gzip;
+
+import java.util.zip.Inflater;
+
+import com.alex.io.Stream;
+
+public class GZipDecompressor {
+
+ private static final Inflater inflaterInstance = new Inflater(true);
+
+ public static final boolean decompress(Stream stream, byte data[]) {
+ synchronized(inflaterInstance) {
+ if (stream.getBuffer()[stream.getOffset()] != 31 || stream.getBuffer()[stream.getOffset() + 1] != -117)
+ return false;
+ //throw new RuntimeException("Invalid GZIP header!");
+ try {
+ inflaterInstance.setInput(stream.getBuffer(), stream.getOffset() + 10, -stream.getOffset() - 18 + stream.getBuffer().length);
+ inflaterInstance.inflate(data);
+ } catch (Exception e) {
+ inflaterInstance.reset();
+ return false;
+ //throw new RuntimeException("Invalid GZIP compressed data!");
+ }
+ inflaterInstance.reset();
+ return true;
+ }
+ }
+
+ public static final boolean decompress(byte[] compressed, byte data[], int offset, int length) {
+ synchronized(inflaterInstance) {
+ if (data[offset] != 31 || data[offset + 1] != -117)
+ return false;
+ //throw new RuntimeException("Invalid GZIP header!");
+ try {
+ inflaterInstance.setInput(data, offset + 10, -offset - 18 + length);
+ inflaterInstance.inflate(compressed);
+ } catch (Exception e) {
+ inflaterInstance.reset();
+ e.printStackTrace();
+ return false;
+ //throw new RuntimeException("Invalid GZIP compressed data!");
+ }
+ inflaterInstance.reset();
+ return true;
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/com/alex/util/whirlpool/Whirlpool.java b/Tools/Cache Editor/src/com/alex/util/whirlpool/Whirlpool.java
new file mode 100644
index 000000000..86cce2df0
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/util/whirlpool/Whirlpool.java
@@ -0,0 +1,415 @@
+package com.alex.util.whirlpool;
+
+import java.util.Arrays;
+
+/**
+ * The Whirlpool hashing function.
+ *
+ *
+ * References
+ *
+ *
+ * The Whirlpool algorithm was developed by
+ * Paulo S. L. M. Barreto and
+ * Vincent Rijmen.
+ *
+ * See
+ * P.S.L.M. Barreto, V. Rijmen,
+ * ``The Whirlpool hashing function,''
+ * First NESSIE workshop, 2000 (tweaked version, 2003),
+ *
+ *
+ * @author Paulo S.L.M. Barreto
+ * @author Vincent Rijmen.
+ *
+ * @version 3.0 (2003.03.12)
+ *
+ * =============================================================================
+ *
+ * Differences from version 2.1:
+ *
+ * - Suboptimal diffusion matrix replaced by cir(1, 1, 4, 1, 8, 5, 2, 9).
+ *
+ * =============================================================================
+ *
+ * Differences from version 2.0:
+ *
+ * - Generation of ISO/IEC 10118-3 test vectors.
+ * - Bug fix: nonzero carry was ignored when tallying the data length
+ * (this bug apparently only manifested itself when feeding data
+ * in pieces rather than in a single chunk at once).
+ *
+ * Differences from version 1.0:
+ *
+ * - Original S-box replaced by the tweaked, hardware-efficient version.
+ *
+ * =============================================================================
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+public class Whirlpool {
+
+ /**
+ * The message digest size (in bits)
+ */
+ public static final int DIGESTBITS = 512;
+
+ /**
+ * The message digest size (in bytes)
+ */
+ public static final int DIGESTBYTES = DIGESTBITS >>> 3;
+
+ /**
+ * The number of rounds of the internal dedicated block cipher.
+ */
+ protected static final int R = 10;
+
+ /**
+ * The substitution box.
+ */
+ private static final String sbox =
+ "\u1823\uc6E8\u87B8\u014F\u36A6\ud2F5\u796F\u9152" +
+ "\u60Bc\u9B8E\uA30c\u7B35\u1dE0\ud7c2\u2E4B\uFE57" +
+ "\u1577\u37E5\u9FF0\u4AdA\u58c9\u290A\uB1A0\u6B85" +
+ "\uBd5d\u10F4\ucB3E\u0567\uE427\u418B\uA77d\u95d8" +
+ "\uFBEE\u7c66\udd17\u479E\ucA2d\uBF07\uAd5A\u8333" +
+ "\u6302\uAA71\uc819\u49d9\uF2E3\u5B88\u9A26\u32B0" +
+ "\uE90F\ud580\uBEcd\u3448\uFF7A\u905F\u2068\u1AAE" +
+ "\uB454\u9322\u64F1\u7312\u4008\uc3Ec\udBA1\u8d3d" +
+ "\u9700\ucF2B\u7682\ud61B\uB5AF\u6A50\u45F3\u30EF" +
+ "\u3F55\uA2EA\u65BA\u2Fc0\udE1c\uFd4d\u9275\u068A" +
+ "\uB2E6\u0E1F\u62d4\uA896\uF9c5\u2559\u8472\u394c" +
+ "\u5E78\u388c\ud1A5\uE261\uB321\u9c1E\u43c7\uFc04" +
+ "\u5199\u6d0d\uFAdF\u7E24\u3BAB\ucE11\u8F4E\uB7EB" +
+ "\u3c81\u94F7\uB913\u2cd3\uE76E\uc403\u5644\u7FA9" +
+ "\u2ABB\uc153\udc0B\u9d6c\u3174\uF646\uAc89\u14E1" +
+ "\u163A\u6909\u70B6\ud0Ed\ucc42\u98A4\u285c\uF886";
+
+ private static long[][] C = new long[8][256];
+ private static long[] rc = new long[R + 1];
+
+ static {
+ for (int x = 0; x < 256; x++) {
+ char c = sbox.charAt(x/2);
+ long v1 = ((x & 1) == 0) ? c >>> 8 : c & 0xff;
+ long v2 = v1 << 1;
+ if (v2 >= 0x100L) {
+ v2 ^= 0x11dL;
+ }
+ long v4 = v2 << 1;
+ if (v4 >= 0x100L) {
+ v4 ^= 0x11dL;
+ }
+ long v5 = v4 ^ v1;
+ long v8 = v4 << 1;
+ if (v8 >= 0x100L) {
+ v8 ^= 0x11dL;
+ }
+ long v9 = v8 ^ v1;
+ /*
+ * build the circulant table C[0][x] = S[x].[1, 1, 4, 1, 8, 5, 2, 9]:
+ */
+ C[0][x] =
+ (v1 << 56) | (v1 << 48) | (v4 << 40) | (v1 << 32) |
+ (v8 << 24) | (v5 << 16) | (v2 << 8) | (v9 );
+ /*
+ * build the remaining circulant tables C[t][x] = C[0][x] rotr t
+ */
+ for (int t = 1; t < 8; t++) {
+ C[t][x] = (C[t - 1][x] >>> 8) | ((C[t - 1][x] << 56));
+ }
+ }
+
+ /*
+ * build the round constants:
+ */
+ rc[0] = 0L; /* not used (assigment kept only to properly initialize all variables) */
+ for (int r = 1; r <= R; r++) {
+ int i = 8*(r - 1);
+ rc[r] =
+ (C[0][i ] & 0xff00000000000000L) ^
+ (C[1][i + 1] & 0x00ff000000000000L) ^
+ (C[2][i + 2] & 0x0000ff0000000000L) ^
+ (C[3][i + 3] & 0x000000ff00000000L) ^
+ (C[4][i + 4] & 0x00000000ff000000L) ^
+ (C[5][i + 5] & 0x0000000000ff0000L) ^
+ (C[6][i + 6] & 0x000000000000ff00L) ^
+ (C[7][i + 7] & 0x00000000000000ffL);
+ }
+ }
+
+ public static byte[] getHash(byte[] data, int off, int len) {
+ byte source[];
+ if(off <= 0) {
+ source = data;
+ } else {
+ source = new byte[len];
+ for(int i = 0; i < len; i++)
+ source[i] = data[off + i];
+ }
+ Whirlpool whirlpool = new Whirlpool();
+ whirlpool.NESSIEinit();
+ whirlpool.NESSIEadd(source, len * 8);
+ byte digest[] = new byte[64];
+ whirlpool.NESSIEfinalize(digest);
+ return digest;
+ }
+
+ /**
+ * Global number of hashed bits (256-bit counter).
+ */
+ protected byte[] bitLength = new byte[32];
+
+ /**
+ * Buffer of data to hash.
+ */
+ protected byte[] buffer = new byte[64];
+
+ /**
+ * Current number of bits on the buffer.
+ */
+ protected int bufferBits = 0;
+
+ /**
+ * Current (possibly incomplete) byte slot on the buffer.
+ */
+ protected int bufferPos = 0;
+
+ /**
+ * The hashing state.
+ */
+ protected long[] hash = new long[8];
+ protected long[] K = new long[8]; // the round key
+ protected long[] L = new long[8];
+ protected long[] block = new long[8]; // mu(buffer)
+ protected long[] state = new long[8]; // the cipher state
+
+ public Whirlpool() {
+ }
+
+ /**
+ * The core Whirlpool transform.
+ */
+ protected void processBuffer() {
+ /*
+ * map the buffer to a block:
+ */
+ for (int i = 0, j = 0; i < 8; i++, j += 8) {
+ block[i] =
+ (((long)buffer[j ] ) << 56) ^
+ (((long)buffer[j + 1] & 0xffL) << 48) ^
+ (((long)buffer[j + 2] & 0xffL) << 40) ^
+ (((long)buffer[j + 3] & 0xffL) << 32) ^
+ (((long)buffer[j + 4] & 0xffL) << 24) ^
+ (((long)buffer[j + 5] & 0xffL) << 16) ^
+ (((long)buffer[j + 6] & 0xffL) << 8) ^
+ (((long)buffer[j + 7] & 0xffL) );
+ }
+ /*
+ * compute and apply K^0 to the cipher state:
+ */
+ for (int i = 0; i < 8; i++) {
+ state[i] = block[i] ^ (K[i] = hash[i]);
+ }
+ /*
+ * iterate over all rounds:
+ */
+ for (int r = 1; r <= R; r++) {
+ /*
+ * compute K^r from K^{r-1}:
+ */
+ for (int i = 0; i < 8; i++) {
+ L[i] = 0L;
+ for (int t = 0, s = 56; t < 8; t++, s -= 8) {
+ L[i] ^= C[t][(int)(K[(i - t) & 7] >>> s) & 0xff];
+ }
+ }
+ for (int i = 0; i < 8; i++) {
+ K[i] = L[i];
+ }
+ K[0] ^= rc[r];
+ /*
+ * apply the r-th round transformation:
+ */
+ for (int i = 0; i < 8; i++) {
+ L[i] = K[i];
+ for (int t = 0, s = 56; t < 8; t++, s -= 8) {
+ L[i] ^= C[t][(int)(state[(i - t) & 7] >>> s) & 0xff];
+ }
+ }
+ for (int i = 0; i < 8; i++) {
+ state[i] = L[i];
+ }
+ }
+ /*
+ * apply the Miyaguchi-Preneel compression function:
+ */
+ for (int i = 0; i < 8; i++) {
+ hash[i] ^= state[i] ^ block[i];
+ }
+ }
+
+ /**
+ * Initialize the hashing state.
+ */
+ public void NESSIEinit() {
+ Arrays.fill(bitLength, (byte)0);
+ bufferBits = bufferPos = 0;
+ buffer[0] = 0; // it's only necessary to cleanup buffer[bufferPos].
+ Arrays.fill(hash, 0L); // initial value
+ }
+
+ /**
+ * Delivers input data to the hashing algorithm.
+ *
+ * @param source plaintext data to hash.
+ * @param sourceBits how many bits of plaintext to process.
+ *
+ * This method maintains the invariant: bufferBits < 512
+ */
+ public void NESSIEadd(byte[] source, long sourceBits) {
+ /*
+ sourcePos
+ |
+ +-------+-------+-------
+ ||||||||||||||||||||| source
+ +-------+-------+-------
+ +-------+-------+-------+-------+-------+-------
+ |||||||||||||||||||||| buffer
+ +-------+-------+-------+-------+-------+-------
+ |
+ bufferPos
+ */
+ int sourcePos = 0; // index of leftmost source byte containing data (1 to 8 bits).
+ int sourceGap = (8 - ((int)sourceBits & 7)) & 7; // space on source[sourcePos].
+ int bufferRem = bufferBits & 7; // occupied bits on buffer[bufferPos].
+ int b;
+ // tally the length of the added data:
+ long value = sourceBits;
+ for (int i = 31, carry = 0; i >= 0; i--) {
+ carry += (bitLength[i] & 0xff) + ((int)value & 0xff);
+ bitLength[i] = (byte)carry;
+ carry >>>= 8;
+ value >>>= 8;
+ }
+ // process data in chunks of 8 bits:
+ while (sourceBits > 8) { // at least source[sourcePos] and source[sourcePos+1] contain data.
+ // take a byte from the source:
+ b = ((source[sourcePos] << sourceGap) & 0xff) |
+ ((source[sourcePos + 1] & 0xff) >>> (8 - sourceGap));
+ if (b < 0 || b >= 256) {
+ throw new RuntimeException("LOGIC ERROR");
+ }
+ // process this byte:
+ buffer[bufferPos++] |= b >>> bufferRem;
+ bufferBits += 8 - bufferRem; // bufferBits = 8*bufferPos;
+ if (bufferBits == 512) {
+ // process data block:
+ processBuffer();
+ // reset buffer:
+ bufferBits = bufferPos = 0;
+ }
+ buffer[bufferPos] = (byte)((b << (8 - bufferRem)) & 0xff);
+ bufferBits += bufferRem;
+ // proceed to remaining data:
+ sourceBits -= 8;
+ sourcePos++;
+ }
+ // now 0 <= sourceBits <= 8;
+ // furthermore, all data (if any is left) is in source[sourcePos].
+ if (sourceBits > 0) {
+ b = (source[sourcePos] << sourceGap) & 0xff; // bits are left-justified on b.
+ // process the remaining bits:
+ buffer[bufferPos] |= b >>> bufferRem;
+ } else {
+ b = 0;
+ }
+ if (bufferRem + sourceBits < 8) {
+ // all remaining data fits on buffer[bufferPos], and there still remains some space.
+ bufferBits += sourceBits;
+ } else {
+ // buffer[bufferPos] is full:
+ bufferPos++;
+ bufferBits += 8 - bufferRem; // bufferBits = 8*bufferPos;
+ sourceBits -= 8 - bufferRem;
+ // now 0 <= sourceBits < 8; furthermore, all data is in source[sourcePos].
+ if (bufferBits == 512) {
+ // process data block:
+ processBuffer();
+ // reset buffer:
+ bufferBits = bufferPos = 0;
+ }
+ buffer[bufferPos] = (byte)((b << (8 - bufferRem)) & 0xff);
+ bufferBits += (int)sourceBits;
+ }
+ }
+
+ /**
+ * Get the hash value from the hashing state.
+ *
+ * This method uses the invariant: bufferBits < 512
+ */
+ public void NESSIEfinalize(byte[] digest) {
+ // append a '1'-bit:
+ buffer[bufferPos] |= 0x80 >>> (bufferBits & 7);
+ bufferPos++; // all remaining bits on the current byte are set to zero.
+ // pad with zero bits to complete 512N + 256 bits:
+ if (bufferPos > 32) {
+ while (bufferPos < 64) {
+ buffer[bufferPos++] = 0;
+ }
+ // process data block:
+ processBuffer();
+ // reset buffer:
+ bufferPos = 0;
+ }
+ while (bufferPos < 32) {
+ buffer[bufferPos++] = 0;
+ }
+ // append bit length of hashed data:
+ System.arraycopy(bitLength, 0, buffer, 32, 32);
+ // process data block:
+ processBuffer();
+ // return the completed message digest:
+ for (int i = 0, j = 0; i < 8; i++, j += 8) {
+ long h = hash[i];
+ digest[j ] = (byte)(h >>> 56);
+ digest[j + 1] = (byte)(h >>> 48);
+ digest[j + 2] = (byte)(h >>> 40);
+ digest[j + 3] = (byte)(h >>> 32);
+ digest[j + 4] = (byte)(h >>> 24);
+ digest[j + 5] = (byte)(h >>> 16);
+ digest[j + 6] = (byte)(h >>> 8);
+ digest[j + 7] = (byte)(h );
+ }
+ }
+
+ /**
+ * Delivers string input data to the hashing algorithm.
+ *
+ * @param source plaintext data to hash (ASCII text string).
+ *
+ * This method maintains the invariant: bufferBits < 512
+ */
+ public void NESSIEadd(String source) {
+ if (source.length() > 0) {
+ byte[] data = new byte[source.length()];
+ for (int i = 0; i < source.length(); i++) {
+ data[i] = (byte)source.charAt(i);
+ }
+ NESSIEadd(data, 8 * data.length);
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/com/alex/utils/ByteBufferUtils.java b/Tools/Cache Editor/src/com/alex/utils/ByteBufferUtils.java
new file mode 100644
index 000000000..e04aa5797
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/utils/ByteBufferUtils.java
@@ -0,0 +1,153 @@
+package com.alex.utils;
+
+import java.nio.ByteBuffer;
+
+
+/**
+ * Holds utility methods for reading/writing a byte buffer.
+ * @author Emperor
+ *
+ */
+public final class ByteBufferUtils {
+
+ /**
+ * Gets a string from the byte buffer.
+ * @param buffer The byte buffer.
+ * @return The string.
+ */
+ public static String getString(ByteBuffer buffer) {
+ StringBuilder sb = new StringBuilder();
+ byte b;
+ while ((b = buffer.get()) != 0) {
+ sb.append((char) b);
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Puts a string on the byte buffer.
+ * @param s The string to put.
+ * @param buffer The byte buffer.
+ */
+ public static void putString(String s, ByteBuffer buffer) {
+ buffer.put(s.getBytes()).put((byte) 0);
+ }
+
+ /**
+ * Gets a string from the byte buffer.
+ * @param s The string.
+ * @param buffer The byte buffer.
+ * @return The string.
+ */
+ public static ByteBuffer putGJ2String(String s, ByteBuffer buffer) {
+ byte[] packed = new byte[256];
+ int length = packGJString2(0, packed, s);
+ return buffer.put((byte) 0).put(packed, 0, length).put((byte) 0);
+ }
+
+ /**
+ * Decodes the XTEA encryption.
+ * @param keys The keys.
+ * @param start The start index.
+ * @param end The end index.
+ * @param buffer The byte buffer.
+ */
+ public static void decodeXTEA(int[] keys, int start, int end, ByteBuffer buffer) {
+ int l = buffer.position();
+ buffer.position(start);
+ int length = (end - start) / 8;
+ for (int i = 0; i < length; i++) {
+ int firstInt = buffer.getInt();
+ int secondInt = buffer.getInt();
+ int sum = 0xc6ef3720;
+ int delta = 0x9e3779b9;
+ for (int j = 32; j-- > 0;) {
+ secondInt -= keys[(sum & 0x1c84) >>> 11] + sum ^ (firstInt >>> 5 ^ firstInt << 4) + firstInt;
+ sum -= delta;
+ firstInt -= (secondInt >>> 5 ^ secondInt << 4) + secondInt ^ keys[sum & 3] + sum;
+ }
+ buffer.position(buffer.position() - 8);
+ buffer.putInt(firstInt);
+ buffer.putInt(secondInt);
+ }
+ buffer.position(l);
+ }
+
+ /**
+ * Converts a String to an Integer?
+ *
+ * @param position
+ * The position.
+ * @param buffer
+ * The buffer used.
+ * @param string
+ * The String to convert.
+ * @return The Integer.
+ */
+ public static int packGJString2(int position, byte[] buffer, String string) {
+ int length = string.length();
+ int offset = position;
+ for (int i = 0; length > i; i++) {
+ int character = string.charAt(i);
+ if (character > 127) {
+ if (character > 2047) {
+ buffer[offset++] = (byte) ((character | 919275) >> 12);
+ buffer[offset++] = (byte) (128 | ((character >> 6) & 63));
+ buffer[offset++] = (byte) (128 | (character & 63));
+ } else {
+ buffer[offset++] = (byte) ((character | 12309) >> 6);
+ buffer[offset++] = (byte) (128 | (character & 63));
+ }
+ } else
+ buffer[offset++] = (byte) character;
+ }
+ return offset - position;
+ }
+
+ /**
+ * Gets a tri-byte from the buffer.
+ * @param buffer The buffer.
+ * @return The value.
+ */
+ public static int getTriByte(ByteBuffer buffer) {
+ return ((buffer.get() & 0xFF) << 16) + ((buffer.get() & 0xFF) << 8) + (buffer.get() & 0xFF);
+ }
+
+ /**
+ * Gets a smart from the buffer.
+ * @param buffer The buffer.
+ * @return The value.
+ */
+ public static int getSmart(ByteBuffer buffer) {
+ int peek = buffer.get() & 0xFF;
+ if (peek <= Byte.MAX_VALUE) {
+ return peek;
+ }
+ return ((peek << 8) | (buffer.get() & 0xFF)) - 32768;
+ }
+
+ /**
+ * Gets a smart from the buffer.
+ * @param buffer The buffer.
+ * @return The value.
+ */
+ public static int getBigSmart(ByteBuffer buffer) {
+ int value = 0;
+ int current = getSmart(buffer);
+ while (current == 32767) {
+ current = getSmart(buffer);
+ value += 32767;
+ }
+ value += current;
+ return value;
+ }
+
+ /**
+ * Constructs a new {@code ByteBufferUtils} {@code Object}.
+ */
+ private ByteBufferUtils() {
+ /*
+ * empty.
+ */
+ }
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/com/alex/utils/Constants.java b/Tools/Cache Editor/src/com/alex/utils/Constants.java
new file mode 100644
index 000000000..2c5558e2a
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/utils/Constants.java
@@ -0,0 +1,26 @@
+package com.alex.utils;
+
+public final class Constants {
+
+ public static final int NO_COMPRESSION = 0;
+ public static final int BZIP2_COMPRESSION = 1;
+ public static final int GZIP_COMPRESSION = 2;
+
+ public static final int MAX_VALID_ARCHIVE_LENGTH = 1000000;
+
+ public static final int INTERFACE_DEFINITIONS_INDEX = 3;
+ public static final int MAPS_INDEX = 5;
+ public static final int MODELS_INDEX = 7;
+ public static final int SPRITES_INDEX = 8;
+ public static final int INDEXED_IMAGES_INDEX = 8;
+ public static final int OBJECTS_DEFINITIONS_INDEX = 18;
+ public static final int ITEM_DEFINITIONS_INDEX = 19;
+ public static final int LOADER_IMAGES_INDEX = 32;
+ public static final int LOADER_INDEXED_IMAGES_INDEX = 34;
+ public static final int CLIENT_BUILD = 718;
+ public static final boolean ENCRYPTED_CACHE = true;
+
+ private Constants() {
+
+ }
+}
diff --git a/Tools/Cache Editor/src/com/alex/utils/Utils.java b/Tools/Cache Editor/src/com/alex/utils/Utils.java
new file mode 100644
index 000000000..afa5cf9ad
--- /dev/null
+++ b/Tools/Cache Editor/src/com/alex/utils/Utils.java
@@ -0,0 +1,62 @@
+package com.alex.utils;
+
+import java.math.BigInteger;
+
+import com.alex.io.OutputStream;
+import com.alex.store.Store;
+
+public final class Utils {
+
+ public static byte[] cryptRSA(byte[] data, BigInteger exponent, BigInteger modulus) {
+ return new BigInteger(data).modPow(exponent, modulus).toByteArray();
+ }
+
+ public static byte[] getArchivePacketData(int indexId, int archiveId,
+ byte[] archive) {
+ OutputStream stream = new OutputStream(archive.length + 4);
+ stream.writeByte(indexId);
+ stream.writeShort(archiveId);
+ stream.writeByte(0); // priority, no compression
+ stream.writeInt(archive.length);
+ int offset = 8;
+ for (int index = 0; index < archive.length; index++) {
+ if (offset == 512) {
+ stream.writeByte(-1);
+ offset = 1;
+ }
+ stream.writeByte(archive[index]);
+ offset++;
+ }
+ byte[] packet = new byte[stream.getOffset()];
+ stream.setOffset(0);
+ stream.getBytes(packet, 0, packet.length);
+ return packet;
+ }
+
+ public static int getNameHash(String name) {
+ return name.toLowerCase().hashCode();
+ }
+
+ public static final int getInterfaceDefinitionsSize(Store store) {
+ return store.getIndexes()[3].getLastArchiveId();
+ }
+
+ public static final int getInterfaceDefinitionsComponentsSize(Store store,
+ int interfaceId) {
+ return store.getIndexes()[3].getLastFileId(interfaceId);
+ }
+
+ public static final int getItemDefinitionsSize(Store store) {
+ int lastArchiveId = store.getIndexes()[19].getLastArchiveId();
+ return lastArchiveId * 256
+ + store.getIndexes()[19].getValidFilesCount(lastArchiveId);
+ }
+
+
+
+
+ private Utils() {
+
+ }
+
+}
diff --git a/Tools/Cache Editor/src/emperor/DefDumper.java b/Tools/Cache Editor/src/emperor/DefDumper.java
new file mode 100644
index 000000000..158865588
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/DefDumper.java
@@ -0,0 +1,27 @@
+package emperor;
+
+import java.io.BufferedWriter;
+import java.io.FileWriter;
+import java.util.Arrays;
+
+import alex.cache.loaders.ObjectDefinitions;
+
+import com.alex.store.Store;
+
+public class DefDumper {
+
+ public static void main(String...args) throws Throwable {
+ Store store = new Store("./508/");
+ BufferedWriter bw = new BufferedWriter(new FileWriter("./508_object_list.txt"));
+ for (int i = 0; i < 100_000; i++) {
+ ObjectDefinitions def = ObjectDefinitions.initialize(i, store);
+ if (def == null) {
+ continue;
+ }
+ bw.append("definition [id=" + i + ", options=" + Arrays.toString(def.options) + "]");
+ bw.newLine();
+ }
+ bw.flush();
+ bw.close();
+ }
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/emperor/DonatorIconPacker.java b/Tools/Cache Editor/src/emperor/DonatorIconPacker.java
new file mode 100644
index 000000000..bf9744778
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/DonatorIconPacker.java
@@ -0,0 +1,159 @@
+package emperor;
+
+import java.awt.image.BufferedImage;
+import java.io.File;
+import java.io.IOException;
+
+import javax.imageio.ImageIO;
+
+import com.alex.loaders.images.IndexedColorImageFile;
+import com.alex.store.Store;
+
+/**
+ * Handles the donator icon packing.
+ * @author Vexia
+ *
+ */
+public final class DonatorIconPacker {
+
+ /**
+ * The icons to pack.
+ */
+ private static String[] ICONS = new String[] {"green", "red", "yellow", "blue", "orange", "pink", "purple", "brown", "world_announce", "rainbow", "whip_icon"};
+
+ /**
+ * The path.
+ */
+ private static final String PATH = "./icons";
+
+ /**
+ * The icon dump.
+ */
+ private static final String DUMP_PATH = "./icon_dump";
+
+ /**
+ * The archive id.
+ */
+ private static final int ACRHIVE_ID = 815;
+
+ /**
+ * The starting index.
+ */
+ private static final int START_INDEX = 2;
+
+ /**
+ * The index color image file.
+ */
+ private static IndexedColorImageFile colorFile;
+
+ /**
+ * The store to work with.
+ */
+ private static Store store;
+
+ /**
+ * Runs the donator icon packer.
+ * @param args the arguments.
+ * @throws IOException the exception.
+ */
+ public static void main(String...args) throws IOException {
+ setStore(new Store("./498/"));
+ colorFile = new IndexedColorImageFile(store, ACRHIVE_ID, 0);
+ //colorFile.replaceImage(ImageIO.read(new File("logo.png")), 0);
+ colorFile.addImage(ImageIO.read(new File("nazi.png")));
+ //colorFile.delete(1);
+ //packAll();
+ dump();
+ save();
+ }
+
+ /**
+ * Packs all the icons.
+ * @throws IOException the exception.
+ */
+ public static void packAll() throws IOException {
+ for (int i = 0; i < ICONS.length; i++) {
+ pack(i, getImage(ICONS[i]));
+ }
+ }
+
+ /**
+ * Packs an image to the cache.
+ * @param index the index.
+ * @param image the image.
+ */
+ public static void pack(int index, BufferedImage image) {
+ if (image == null) {
+ System.out.println("Image null at " + index + "!");
+ return;
+ }
+ String name = ICONS[index];
+ int realIndex = START_INDEX + index;
+ int indexPacked = 0;
+ boolean replace = false;
+ if (realIndex < colorFile.getImages().length) {
+ colorFile.replaceImage(image, realIndex);
+ replace = true;
+ } else {
+ indexPacked = colorFile.addImage(image);
+ }
+ save();
+ System.out.println("Packing icon with name - " + name + ", chat index=" + realIndex + ", indexPacked=" + indexPacked + ", replace=" + replace + "!");
+ }
+
+ /**
+ * Dumps the icon
+ * @throws IOException the exception.
+ */
+ public static void dump() throws IOException {
+ dumpIcons(DUMP_PATH);
+ }
+
+ /**
+ * Dumps the icons to a path.
+ * @param path the path.
+ * @throws IOException the exception.
+ */
+ public static void dumpIcons(String path) throws IOException {
+ int index = 0;
+ System.out.println("Size=" + colorFile.getImages().length);
+ for (BufferedImage image : colorFile.getImages()) {
+ String name = path + "/icon-" + index++ + ".png";
+ ImageIO.write(image, "PNG", new File(name));
+ System.out.println("Dumping icon - " + name);
+ }
+ }
+
+ /**
+ * Saves the index.
+ */
+ public static void save() {
+ store.getIndexes()[8].putFile(ACRHIVE_ID, 0, colorFile.encodeFile());
+ }
+
+ /**
+ * Gets a buffered image.
+ * @param name the name.
+ * @return the image.
+ * @throws IOException the exception.
+ */
+ public static BufferedImage getImage(String name) throws IOException {
+ return ImageIO.read(new File(PATH + "/" + name + ".png"));
+ }
+
+ /**
+ * Gets the store.
+ * @return the store
+ */
+ public static Store getStore() {
+ return store;
+ }
+
+ /**
+ * Sets the store.
+ * @param store the store to set
+ */
+ public static void setStore(Store store) {
+ DonatorIconPacker.store = store;
+ }
+}
diff --git a/Tools/Cache Editor/src/emperor/ItemPacker.java b/Tools/Cache Editor/src/emperor/ItemPacker.java
new file mode 100644
index 000000000..bebe23ac7
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/ItemPacker.java
@@ -0,0 +1,139 @@
+package emperor;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+import com.alex.loaders.items.ItemDefinitions;
+import com.alex.store.Index;
+import com.alex.store.Store;
+
+/**
+ * Packs items.
+ * @author Vexia
+ *
+ */
+public class ItemPacker {
+
+ /**
+ * The store to pack to.
+ */
+ private static Store store;
+
+ /**
+ * The main method.
+ * @param args the arguments.
+ * @throws IOException the exception.
+ */
+ public static void main(String...args) throws IOException {
+ store = new Store("./498/");
+ String modelName = "models/44590.dat";
+ packItem(modelName, "Dragon claws");
+ }
+
+ /**
+ * Gets the size.
+ * @return the size.
+ */
+ public static int getSize() {
+ Index index = store.getIndexes()[19];
+ int lastId = index.getLastArchiveId();
+ int fileSize = index.getFile(lastId).length;
+ System.err.println(fileSize);
+ System.err.println(index.getValidFilesCount(lastId));
+ int size = lastId * 256 + fileSize;
+ return size;//13247, 51, 191
+
+ }
+
+ /**
+ * Packs an item.
+ * @param modelName the model name.
+ * @param itemName the name.
+ * @throws IOException the exception.
+ */
+ public static void packItem(String modelName, String itemName) throws IOException {
+ ItemDefinitions def = buildItem(modelName, itemName);
+ System.out.println("Attempting to pack the model - " + modelName + ", for item name - " + itemName);
+ packCustomItem(def);
+ System.out.println("Item packed.");
+ }
+
+ /**
+ * Packs a custom model.
+ * @param data the data.
+ * @return the model.
+ */
+ public static int packCustomModel(byte[] data) {
+ int archiveId = store.getIndexes()[19].getLastArchiveId()+1;
+ if(store.getIndexes()[19].putFile(archiveId, 0, data)) {
+ return archiveId;
+ }
+ return -1;
+ }
+
+ /**
+ * Builds an item.
+ * @param modelName the name.
+ * @param itemName the item name.
+ * @return the def.
+ * @throws IOException
+ */
+ public static ItemDefinitions buildItem(String modelName, String itemName) throws IOException {
+ int modelId = packCustomModel(getBytesFromFile(new File(modelName)));
+ ItemDefinitions definition = ItemDefinitions.getItemDefinition(store, 3101);
+ definition.setName(itemName);
+ definition.femaleEquipModelId1 = modelId;
+ definition.maleEquipModelId1 = modelId;
+ definition.invModelId = modelId;
+ return definition;
+ }
+
+ /**
+ * Packs the custom item.
+ * @param cache the cache.
+ * @param id the id.
+ * @param def the def.
+ */
+ public static void packCustomItem(ItemDefinitions def) {
+ int id = 13248;
+ store.getIndexes()[19].putFile(id >>> 8, 0xff & id, def.encode());
+ }
+
+ /**
+ * Gets all the bytes from the file.
+ * @param file the file.
+ * @return the bytes.
+ * @throws IOException the exception.
+ */
+ @SuppressWarnings("resource")
+ public static byte[] getBytesFromFile(File file) throws IOException {
+ InputStream is = new FileInputStream(file);
+ // Get the size of the file
+ long length = file.length();
+ // You cannot create an array using a long type.
+ // It needs to be an int type.
+ // Before converting to an int type, check
+ // to ensure that file is not larger than Integer.MAX_VALUE.
+ if (length > Integer.MAX_VALUE) {
+ // File is too large
+ }
+ // Create the byte array to hold the data
+ byte[] bytes = new byte[(int)length];
+ // Read in the bytes
+ int offset = 0;
+ int numRead = 0;
+ while (offset < bytes.length
+ && (numRead=is.read(bytes, offset, bytes.length-offset)) >= 0) {
+ offset += numRead;
+ }
+ // Ensure all the bytes have been read in
+ if (offset < bytes.length) {
+ throw new IOException("Could not completely read file "+file.getName());
+ }
+ // Close the input stream and return bytes
+ is.close();
+ return bytes;
+ }
+}
diff --git a/Tools/Cache Editor/src/emperor/LandMap.java b/Tools/Cache Editor/src/emperor/LandMap.java
new file mode 100644
index 000000000..9bfaa3c0c
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/LandMap.java
@@ -0,0 +1,83 @@
+package emperor;
+
+import java.nio.ByteBuffer;
+
+public class LandMap {
+
+ ByteBuffer buffer;
+ Byte[][][] overlayOpcodes = new Byte[4][64][64];
+ Byte[][][] overlays = new Byte[4][64][64];
+ Byte[][][] underlays = new Byte[4][64][64];
+ Byte[][][] defaultOpcodes = new Byte[4][64][64];
+ Byte[][][] height = new Byte[4][64][64];
+
+ public void addOverlay(int z, int x, int y, int overlay) {
+ overlays[z][x][y] = (byte) overlay;
+ }
+ public void addUnderlay(int z, int x, int y, int underlay) {
+ underlays[z][x][y] = (byte) underlay;
+ }
+
+ public byte[] generate() {
+ ByteBuffer buffer = ByteBuffer.allocate(1 << 20);
+ for (int z = 0; z < 4; z++) {
+ for (int x = 0; x < 64; x++) {
+ for (int y = 0; y < 64; y++) {
+ Byte b = null;
+ if ((b = defaultOpcodes[z][x][y]) != null) {
+ buffer.put(b);
+ }
+ if ((b = underlays[z][x][y]) != null) {
+ buffer.put(b);
+ }
+ if ((b = overlayOpcodes[z][x][y]) != null) {
+ buffer.put(b);
+ buffer.put(overlays[z][x][y]);
+ }
+ if ((b = height[z][x][y]) != null) {
+ buffer.put((byte) 1);
+ buffer.put(b);
+ } else {
+ buffer.put((byte) 0);
+ }
+ }
+ }
+ }
+ while (this.buffer.hasRemaining()) {
+ buffer.put(this.buffer.get());
+ }
+ buffer.flip();
+ byte[] bs = new byte[buffer.remaining()];
+ buffer.get(bs);
+ return bs;
+ }
+
+ public void map(ByteBuffer buffer) {
+ this.buffer = buffer;
+ for (int z = 0; z < 4; z++) {
+ for (int x = 0; x < 64; x++) {
+ for (int y = 0; y < 64; y++) {
+ while (true) {
+ int opcode = buffer.get() & 0xFF;
+ if (opcode == 0) {
+ break;
+ }
+ if (opcode == 1) {
+ height[z][x][y] = buffer.get();
+ break;
+ }
+ if (opcode <= 49) {
+ overlayOpcodes[z][x][y] = (byte) opcode;
+ overlays[z][x][y] = buffer.get();
+ } else if (opcode <= 81) {
+ underlays[z][x][y] = (byte) opcode;
+ } else {
+ defaultOpcodes[z][x][y] = (byte) opcode;
+ }
+ }
+ }
+ }
+ }
+ System.out.println("Read landscape (remaining=" + buffer.remaining() + ").");
+ }
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/emperor/Landscape.java b/Tools/Cache Editor/src/emperor/Landscape.java
new file mode 100644
index 000000000..68e86bece
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/Landscape.java
@@ -0,0 +1,15 @@
+package emperor;
+
+public class Landscape {
+
+ byte[][][] flags = new byte[4][64][64];
+ byte[][][] overlays = new byte[4][64][64];
+ byte[][][] underlays = new byte[4][64][64];
+
+ public void addOverlay(int z, int x, int y, int overlay) {
+ overlays[z][x][y] = (byte) overlay;
+ }
+ public void addUnderlay(int z, int x, int y, int underlay) {
+ underlays[z][x][y] = (byte) underlay;
+ }
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/emperor/LandscapeCache.java b/Tools/Cache Editor/src/emperor/LandscapeCache.java
new file mode 100644
index 000000000..1d8ebafd8
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/LandscapeCache.java
@@ -0,0 +1,243 @@
+package emperor;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.FileChannel.MapMode;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.alex.store.Store;
+import com.alex.util.gzip.GZipCompressor;
+import com.alex.util.gzip.GZipDecompressor;
+
+/**
+ * Holds the map cache.
+ *
+ * @author Emperor
+ *
+ */
+public final class LandscapeCache {
+
+ /**
+ * The map indices buffer.
+ */
+ private static ByteBuffer mapIndices;
+
+ /**
+ * The landscapes;
+ */
+ private static final Map landscapes = new HashMap<>();
+
+ /**
+ * The amount of indexes.
+ */
+ private static int indexes;
+
+ /**
+ * The cache length.
+ */
+ private static int cacheLength;
+
+ /**
+ * The indexes list.
+ */
+ private static int[] indices = null;
+
+ /**
+ * The path.
+ */
+ private static String path;
+
+ /**
+ * The file store.
+ */
+ private static Store store;
+
+ /**
+ * Initializes the landscape cache stuff.
+ *
+ * @param path
+ * The cache path.
+ * @throws Throwable
+ * When an exception occurs.
+ */
+ public static void init(String path, Store store) throws Throwable {
+ LandscapeCache.path = path;
+ LandscapeCache.store = store;
+ try {
+ RandomAccessFile raf = new RandomAccessFile(path + "/idx_reference.dat", "r");
+ FileChannel channel = raf.getChannel();
+ mapIndices = channel.map(MapMode.READ_ONLY, 0, channel.size());
+ raf.close();
+ channel.close();
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ cacheLength = (int) new File(path + "/map_cache_file.idx0").length();
+ ByteBuffer buffer = mapIndices.duplicate();
+ indexes = buffer.getShort() & 0xFFFF;
+ indices = new int[indexes];
+ for (int i = 0; i < indexes; i++) {
+ indices[i] = buffer.getInt();
+ }
+ int count = 0;
+ for (int i = 0; i < indexes; i++) {
+ byte[] b = forId(i);
+ if (b != null && b.length > 0) {
+ landscapes.put(i, b);
+ count++;
+ }
+ }
+ System.out.println("Succesfully loaded " + count + "/" + indexes + " regions!");
+ }
+
+ /**
+ * Gets the landscape byte buffer.
+ *
+ * @param regionId
+ * The region id.
+ * @return The landscape buffer.
+ */
+ public static byte[] getLandscape(int regionId) {
+ int index = LandscapeCache.indexFor(regionId);
+ return forId(index);
+ }
+
+ /**
+ * Gets the maps for the given id.
+ *
+ * @param id
+ * The id.
+ * @return The map data.
+ */
+ public static byte[] forId(int id) {
+ if (id < 0) {
+ return new byte[0];
+ }
+ try {
+ RandomAccessFile raf = new RandomAccessFile(path + "/map_cache_file.idx0", "r");
+ FileChannel channel = raf.getChannel();
+ int size = (int) ((id >= indexes - 1 ? channel.size() : indices[id + 1]) - indices[id]);
+ if (size < 3) {
+ raf.close();
+ channel.close();
+ // System.out.println("Index " + id + " has invalid size!");
+ channel.close();
+ return new byte[0];
+ }
+ //System.out.println("Size: " + size + "/" + channel.size() + ", index: " + indices[id]);
+ MappedByteBuffer buffer = channel.map(MapMode.READ_ONLY, indices[id], size);
+ raf.close();
+ channel.close();
+ int length = size - 2;
+ if (length < 1) {
+ return new byte[0];
+ }
+ int decompressedLength = buffer.getShort() & 0xFFFF;
+ byte[] b = new byte[length];
+ buffer.get(b);
+ byte[] data = new byte[decompressedLength];
+ try {
+ GZipDecompressor.decompress(data, b, 0, b.length);
+ } catch (Throwable t) {
+ System.err.println("Failed to decompress idx " + id + "!");
+ return new byte[0];
+ }
+ return data;
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return new byte[0];
+ }
+
+ public static void dump(String path) throws Throwable {
+ indices = new int[indexes];
+ int offset = 0;
+ ByteBuffer mapCache = ByteBuffer.allocate(10_000_000);
+ for (int i = 0; i < indexes; i++) {
+ indices[i] = offset;
+ byte[] bs = landscapes.get(i);
+ if (bs != null && bs.length > 1) {
+ mapCache.putShort((short) bs.length);
+ byte[] b = GZipCompressor.compress(bs);
+ mapCache.put(b);
+ offset += 2 + b.length;
+ }
+ }
+ mapCache.flip();
+ File f = new File(path + "/map_cache_file.idx0");
+ if (f.exists()) {
+ if (!f.delete()) {
+ System.err.println("Could not delete #1!");
+ }
+ }
+ RandomAccessFile raf = new RandomAccessFile(f, "rw");
+ FileChannel channel = raf.getChannel();
+ channel.write(mapCache);
+ raf.close();
+ channel.close();
+ ByteBuffer buffer = ByteBuffer.allocate(100_000);
+ buffer.putShort((short) indexes);
+ for (int i = 0; i < indexes; i++) {
+ buffer.putInt(indices[i]);
+ }
+ buffer.flip();
+ f = new File(path + "/idx_reference.dat");
+ if (f.exists()) {
+ if (!f.delete()) {
+ System.err.println("Could not delete #2!");
+ f = new File(path + "/conflict-idx_reference.dat");
+ }
+ }
+ raf = new RandomAccessFile(f, "rw");
+ channel = raf.getChannel();
+ channel.write(buffer);
+ raf.close();
+ channel.close();
+ }
+
+ /**
+ * Gets the index for the region id.
+ *
+ * @param regionId
+ * The region id.
+ * @return The index.
+ */
+ public static int indexFor(int regionId) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ return store.getIndexes()[5].getArchiveId(new StringBuilder("l").append(regionX).append("_").append(regionY).toString());
+ }
+
+ /**
+ * Gets the reference table buffer.
+ *
+ * @return The reference table buffer.
+ */
+ public static ByteBuffer getReferenceTable() {
+ ByteBuffer buffer = ByteBuffer.allocate(mapIndices.remaining() + 10);
+ return buffer.put((byte) 251).putInt(LandscapeCache.getMapIndices().remaining()).putInt(cacheLength).put(LandscapeCache.getMapIndices().duplicate());
+ }
+
+ /**
+ * Gets the mapIndices.
+ *
+ * @return The mapIndices.
+ */
+ public static ByteBuffer getMapIndices() {
+ return mapIndices;
+ }
+
+ /**
+ * Gets the landscapes mapping.
+ * @return The mapping.
+ */
+ public static Map getLandscapes() {
+ return landscapes;
+ }
+
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/emperor/LandscapeEditor.java b/Tools/Cache Editor/src/emperor/LandscapeEditor.java
new file mode 100644
index 000000000..7491f61ea
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/LandscapeEditor.java
@@ -0,0 +1,465 @@
+package emperor;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.FileChannel.MapMode;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apollo.fs.IndexedFileSystem;
+import org.apollo.fs.util.ZipUtils;
+
+import alex.cache.loaders.OverlayDefinition;
+
+import com.alex.io.InputStream;
+import com.alex.store.Index;
+import com.alex.store.Store;
+import com.alex.tools.clientCacheUpdater.RSXteas;
+import com.alex.utils.Constants;
+import com.alex.utils.Utils;
+
+import emperor.ObjectMap.GameObject;
+
+/**
+ * @author Emperor
+ */
+public class LandscapeEditor {
+
+ public static final boolean COPY_OUT = true;
+
+ public static final void main(String...args) throws Throwable {
+ if (COPY_OUT) {
+ for (File f : new File("./mapcache_out/").listFiles()) {
+ copyFile(f, new File("./mapcache/" + f.getName()));
+ }
+ }
+// Store store = new Store("./498/");
+// packMaps(store);
+// checkNonOceanic(store, new int[] {8240, 8241, 8242, 8243, 8249, 8250, 8251, 8254, 8255, 8256, 8505, 8506, 8507, 8510, 8511, 8512, 8761, 8762, 8764, 8765, 8766, 8767, 8768, 9018, 9019, 9020, 9021, 9022, 9023, 9024, 9262, 9274, 9277, 9278, 9279, 9280, 9363, 9518, 9529, 9530, 9533, 9534, 9535, 9536, 9539, 9618, 9784, 9785, 9786, 9787, 9788, 9789, 9790, 9791, 9792, 10023, 10024, 10025, 10026, 10027, 10041, 10045, 10046, 10047, 10048, 10279, 10280, 10281, 10282, 10283, 10298, 10299, 10302, 10303, 10304, 10535, 10538, 10539, 10541, 10543, 10555, 10556, 10557, 10560, 10568, 10570, 10791, 10792, 10796, 10797, 10798, 10799, 10800, 10813, 10815, 10816, 10824, 10825, 10826, 11047, 11048, 11049, 11052, 11069, 11070, 11071, 11072, 11080, 11082, 11303, 11305, 11307, 11308, 11326, 11327, 11328, 11559, 11560, 11561, 11563, 11564, 11582, 11583, 11584, 11815, 11816, 11817, 11818, 11819, 11820, 11838, 11839, 11840, 12071, 12072, 12073, 12074, 12075, 12076, 12077, 12094, 12095, 12096, 12333, 12334, 12350, 12351, 12352, 12606, 12607, 12608, 12862, 12863, 12864, 13118, 13119, 13120, 13374, 13375, 13376, 13466, 13610, 13628, 13629, 13630, 13631, 13632, 13866, 13867, 13868, 14128, 14136, 14379, 14380, 14381, 14382, 14383, 14384, 14392, 14635, 14636, 14640, 14891, 14892, 14896, 14903, 14904, 15147, 15152, 15158, 15160, 15403, 15404, 15405, 15407, 15408, 15414, 15415, 15416});//new int[] {6731, 6985, 8022, 8240, 8241, 8242, 8243, 8249, 8250, 8251, 8254, 8255, 8256, 8280, 8505, 8506, 8507, 8510, 8511, 8512, 8513, 8515, 8761, 8762, 8764, 8765, 8766, 8767, 8768, 9018, 9019, 9020, 9021, 9022, 9023, 9024, 9262, 9274, 9277, 9278, 9279, 9280, 9363, 9518, 9529, 9530, 9533, 9534, 9535, 9536, 9539, 9618, 9784, 9785, 9786, 9787, 9788, 9789, 9790, 9791, 9792, 10023, 10024, 10025, 10026, 10027, 10041, 10045, 10046, 10047, 10048, 10129, 10279, 10280, 10281, 10282, 10283, 10298, 10299, 10302, 10303, 10304, 10308, 10535, 10538, 10539, 10541, 10543, 10555, 10556, 10557, 10560, 10568, 10570, 10583, 10791, 10792, 10796, 10797, 10798, 10799, 10800, 10813, 10815, 10816, 10824, 10825, 10826, 11047, 11048, 11049, 11052, 11069, 11070, 11071, 11072, 11080, 11082, 11303, 11304, 11305, 11307, 11308, 11326, 11327, 11328, 11559, 11560, 11561, 11563, 11564, 11582, 11583, 11584, 11815, 11816, 11817, 11818, 11819, 11820, 11838, 11839, 11840, 12071, 12072, 12073, 12074, 12075, 12076, 12077, 12094, 12095, 12096, 12333, 12334, 12350, 12351, 12352, 12606, 12607, 12608, 12627, 12862, 12863, 12864, 12889, 12890, 13118, 13119, 13120, 13144, 13145, 13146, 13354, 13374, 13375, 13376, 13400, 13401, 13402, 13466, 13610, 13625, 13626, 13628, 13629, 13630, 13631, 13632, 13866, 13867, 13868, 14128, 14136, 14379, 14380, 14381, 14382, 14383, 14384, 14392, 14635, 14636, 14640, 14648, 14891, 14892, 14896, 14903, 14904, 15147, 15152, 15158, 15160, 15403, 15404, 15405, 15407, 15408, 15414, 15415, 15416});
+// generateCache(store);
+// override(store, 788, 12187);
+// packOSRSMaps(store);
+// packLandscape(store);
+// pack377Maps(store);
+// addMissingMaps(store);
+// createMap(store);
+// changeMap(store);
+ }
+
+ static void packMaps(Store store) throws Throwable {
+ LandscapeCache.init("./mapcache/", store);
+ int[] keys = new int[] { 14881828, -6662814, 58238456, 146761213 };
+ int count = 0;
+ int failed = 0;
+ for (int regionId = 0; regionId < 50_000; regionId++) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ String name = "l" + regionX + "_" + regionY;
+ int index = store.getIndexes()[5].getArchiveId(name);
+ if (index < 0) {
+ continue;
+ }
+ byte[] b = LandscapeCache.forId(index);
+ if (b == null || b.length < 2 || !validRegion(new InputStream(b))) {
+ failed++;
+ continue;
+ }
+ if (store.getIndexes()[5].putFile(index, 0, Constants.GZIP_COMPRESSION, b, keys, true, true, Utils.getNameHash(name), -1)) {
+ count++;
+ } else {
+ failed++;
+ }
+ }
+// store.getIndexes()[5].rewriteTable();
+// store.getIndexes()[5].resetCachedFiles();
+ System.out.println("Packed " + count + " maps (failed " + failed + " maps)!");
+// store2.getIndexes()[i].putFile(oldArchiveId, 0, Constants.GZIP_COMPRESSION, data, keys2, false, false, Utils.getNameHash(nameHash), -1);
+ }
+
+ static void createMap(Store store) throws Throwable {
+ ObjectMap map = new ObjectMap();
+ for (int x = 0; x < 64; x++) {
+ for (int y = 0; y < 64; y++) {
+ if (x == 32 || y == 32) {
+ continue;
+ }
+ map.add(1276, x, y, 0, 10, 0);
+ }
+ }
+ byte[] bs = map.generate();
+ int regionId = 11110;
+ int x = regionId >> 8 & 0xFF;
+ int y = regionId & 0xFF;
+ LandscapeCache.init("./mapcache/", store);
+ int archive = store.getIndexes()[5].getArchiveId("l" + x + "_" + y);
+ if (archive > -1) {
+ System.out.println("Already contained region " + regionId + " (archive=" + archive + ", len=" + bs.length + " - " +LandscapeCache.forId(archive).length + ")!");
+ return;
+ }
+ for (int ar = 0; ar < 50000; ar++) {
+ if (!store.getIndexes()[5].archiveExists(ar)) {
+ if (LandscapeCache.forId(ar).length < 1) {
+ archive = ar;
+ System.out.println("Archive available: " + ar);
+ break;
+ }
+ }
+ }
+ store.getIndexes()[5].putFile(archive, 0, Constants.GZIP_COMPRESSION, bs, null, true, true,
+ Utils.getNameHash("l" + x + "_" + y), -1);
+ LandscapeCache.getLandscapes().put(archive, bs);
+ LandscapeCache.dump("./mapcache_out/");
+ System.out.println("Done!");
+ }
+
+ static void changeMap(Store store) throws Throwable {
+ int regionId = 12439;
+ GameObject[] remove = new GameObject[] {
+ new GameObject(32099, 42, 27, 0, 10, 3)
+ };
+ GameObject[] replace = new GameObject[] {
+ new GameObject(29139, 42, 27, 0, 10, 3)
+ };
+
+ LandscapeCache.init("./mapcache/", store);
+ ObjectMap map = new ObjectMap();
+ map.map(new InputStream(LandscapeCache.getLandscape(regionId)));
+ for (int i = 0; i < remove.length; i++) {
+ GameObject r = remove[i];
+ GameObject object = map.get(r.id, r.loc.x, r.loc.y, r.loc.z, r.type, r.rotation);
+ if (object == null) {
+ System.err.println("Could not find object!");
+ return;
+ }
+ map.getObjects().remove(object);
+ if (replace[i] != null) {
+ map.getObjects().add(replace[i]);
+ }
+ }
+ byte[] bs = map.generate();
+ LandscapeCache.getLandscapes().put(LandscapeCache.indexFor(regionId), bs);
+ LandscapeCache.dump("./mapcache_out/");
+ }
+
+ /**
+ * Overrides the regions.
+ * @param store The file store.
+ * @param revision The revision to get the regions from.
+ * @param regionIds The region ids to override.
+ * @throws Throwable When an exception occurs.
+ */
+ public static void override(Store store, int revision, int...regionIds) throws Throwable {
+ LandscapeCache.init("./mapcache/", store);
+ int count = 0;
+ if (revision == 377) {
+ Store s = new Store("./468/");
+ IndexedFileSystem fs = new IndexedFileSystem(new File("./377/"), true);
+ for (int regionId : regionIds) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ int index = store.getIndexes()[5].getArchiveId(new StringBuilder("l").append(regionX).append("_").append(regionY).toString());
+ byte[] bs = null;
+ try {
+ ByteBuffer buffer = fs.getFile(4, index);
+ bs = ZipUtils.unzip(buffer).array();
+ } catch (Throwable t) {
+ continue;
+ }
+ if (bs != null && validRegion(new InputStream(bs))) {
+ System.out.println("Added region " + regionId + "!");
+ count++;
+ LandscapeCache.getLandscapes().put(index, bs);
+ store.getIndexes()[5].putArchive(s.getIndexes()[5].getArchiveId(new StringBuilder("m").append(regionX).append("_").append(regionY).toString()), s);
+ }
+ }
+ fs.close();
+ } else {
+ RSXteas.loadUnpackedXteas(revision);
+ Store s = new Store("./" + revision + "/");
+ boolean newFormat = revision > 750;
+ for (int regionId : regionIds) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ int index = store.getIndexes()[5].getArchiveId(new StringBuilder("l").append(regionX).append("_").append(regionY).toString());
+ int[] xteas = RSXteas.getXteas(regionId);
+ byte[] b = newFormat ? s.getIndexes()[5].getFile(regionX | regionY << 7, 0)
+ : s.getIndexes()[5].getFile(index, 0, xteas);
+ if (b != null && b.length > 1 && validRegion(new InputStream(b))) {
+ System.out.println("Added region " + regionId + "!");
+ LandscapeCache.getLandscapes().put(index, b);
+ count++;
+ if (!newFormat) {
+ store.getIndexes()[5].putArchive(s.getIndexes()[5].getArchiveId(new StringBuilder("m").append(regionX).append("_").append(regionY).toString()), s);
+ }
+ }
+ }
+ }
+ LandscapeCache.dump("./mapcache_out/");
+ System.out.println("Packed " + count + "/" + regionIds.length + " regions.");
+ }
+
+ /**
+ * Fully generates a map cache (from scratch).
+ * @param store The file store.
+ * @throws Throwable
+ */
+ public static void generateCache(Store store) throws Throwable {
+ LandscapeCache.init("./mapcache/", store);
+ Store s = new Store("./508/");
+ List missingRegions = new ArrayList<>();
+ System.out.println("Packing 508 maps...");
+ RSXteas.loadUnpackedXteas(508);
+ int count = 0;
+ for (int regionId = 0; regionId < 50_000; regionId++) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ int index = store.getIndexes()[5].getArchiveId(new StringBuilder("l").append(regionX).append("_").append(regionY).toString());
+ if (index < 0) {
+ continue;
+ }
+ int[] xteas = RSXteas.getXteas(regionId);
+ byte[] b = s.getIndexes()[5].getFile(index, 0, xteas);
+ if (b == null || b.length < 2 || !validRegion(new InputStream(b))) {
+ RandomAccessFile raf = new RandomAccessFile(new File("./508_Maps/" + index), "r");
+ ByteBuffer buffer = raf.getChannel().map(MapMode.READ_ONLY, 0, raf.length());
+ b = new byte[(int) raf.length()];
+ buffer.get(b);
+ raf.close();
+ if (!validRegion(new InputStream(b))) {
+ missingRegions.add(regionId);
+ continue;
+ }
+ System.out.println("Used 508 map data file for index " + index + "!");
+ }
+ int archiveId = s.getIndexes()[5].getArchiveId(new StringBuilder("m").append(regionX).append("_").append(regionY).toString());
+ if (archiveId > -1) {
+ store.getIndexes()[5].putArchive(archiveId, s);
+ }
+ LandscapeCache.getLandscapes().put(index, b);
+ count++;
+ }
+ System.out.println("Added " + count + " 508 regions!");
+ System.out.println("Packing 468 maps...");
+ RSXteas.loadUnpackedXteas(468);
+ s = new Store("./468/");
+ int subCount = 0;
+ for (int regionId = 0; regionId < 50_000; regionId++) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ int index = s.getIndexes()[5].getArchiveId(new StringBuilder("l").append(regionX).append("_").append(regionY).toString());
+ if (!missingRegions.contains(regionId)) {
+ continue;
+ }
+ int[] xteas = RSXteas.getXteas(regionId);
+ byte[] b = s.getIndexes()[5].getFile(index, 0, xteas);
+ if (b != null && b.length > 1 && validRegion(new InputStream(b))) {
+ System.out.println("Added missing region " + regionId + "!");
+ count++;
+ subCount++;
+ missingRegions.remove((Object) regionId);
+ LandscapeCache.getLandscapes().put(index, b);
+ store.getIndexes()[5].putArchive(s.getIndexes()[5].getArchiveId(new StringBuilder("m").append(regionX).append("_").append(regionY).toString()), s);
+ }
+ }
+ System.out.println("Added " + subCount + " 468 regions!");
+ System.out.println("Packing 377 maps...");
+ subCount = 0;
+ IndexedFileSystem fs = new IndexedFileSystem(new File("./377/"), true);
+ for (int regionId = 0; regionId < 50_000; regionId++) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ int index = store.getIndexes()[5].getArchiveId(new StringBuilder("l").append(regionX).append("_").append(regionY).toString());
+ if (!missingRegions.contains(regionId)) {
+ continue;
+ }
+ byte[] bs = null;
+ try {
+ ByteBuffer buffer = fs.getFile(4, index);
+ bs = ZipUtils.unzip(buffer).array();
+ } catch (Throwable t) {
+ continue;
+ }
+ if (bs != null && validRegion(new InputStream(bs))) {
+ System.out.println("Added missing region " + regionId + "!");
+ count++;
+ subCount++;
+ missingRegions.remove((Object) regionId);
+ LandscapeCache.getLandscapes().put(index, bs);
+ store.getIndexes()[5].putArchive(s.getIndexes()[5].getArchiveId(new StringBuilder("m").append(regionX).append("_").append(regionY).toString()), s);
+ }
+ }
+ System.out.println("Added " + subCount + " 377 regions!");
+ fs.close();
+ System.out.println("Packing 666 maps...");
+ RSXteas.loadUnpackedXteas(666);
+ s = new Store("./666/");
+ subCount = 0;
+ for (int regionId = 0; regionId < 50_000; regionId++) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ int index = store.getIndexes()[5].getArchiveId(new StringBuilder("l").append(regionX).append("_").append(regionY).toString());
+ if (index < 0) {
+ continue;
+ }
+ if (!missingRegions.contains(regionId)) {
+ continue;
+ }
+ int[] xteas = RSXteas.getXteas(regionId);
+ byte[] b = s.getIndexes()[5].getFile(index, 0, xteas);
+ if (b != null && b.length > 1 && validRegion(new InputStream(b))) {
+ System.out.println("Added missing region " + regionId + "!");
+ count++;
+ subCount++;
+ missingRegions.remove((Object) regionId);
+ LandscapeCache.getLandscapes().put(index, b);
+ store.getIndexes()[5].putArchive(s.getIndexes()[5].getArchiveId(new StringBuilder("m").append(regionX).append("_").append(regionY).toString()), s);
+ }
+ }
+ System.out.println("Added " + subCount + " 666 regions!");
+ System.out.println("Packing 788 maps...");
+ s = new Store("./788/");
+ subCount = 0;
+ for (int regionId = 0; regionId < 50_000; regionId++) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ if (!missingRegions.contains(regionId) && regionId != 6234) {
+ continue;
+ }
+ int index = regionX | regionY << 7;
+ byte[] b = s.getIndexes()[5].getFile(index, 0);
+ if (b != null && b.length > 1 && validRegion(new InputStream(b))) {
+ index = store.getIndexes()[5].getArchiveId(new StringBuilder("l").append(regionX).append("_").append(regionY).toString());
+ System.out.println("Added missing region " + regionId + "!");
+ count++;
+ subCount++;
+ missingRegions.remove((Object) regionId);
+ LandscapeCache.getLandscapes().put(index, b);
+ }
+ }
+ System.out.println("Added " + subCount + " 788 regions!");
+ LandscapeCache.dump("./mapcache_out/");
+ System.out.println("Added a total of " + count + " map regions, missing " + missingRegions.size() + " regions.");
+ System.out.println("Missing: " + Arrays.toString(missingRegions.toArray()));
+ System.exit(0);
+ }
+
+ /**
+ * Checks for non-oceanic regions (regions that don't exist purely of sea).
+ * @param store The store.
+ * @param regions The regions array.
+ */
+ public static void checkNonOceanic(Store store, int[] regions) {
+ List missing = new ArrayList<>();
+ for (int regionId : regions) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ int mapscapeId = store.getIndexes()[5].getArchiveId(new StringBuilder("m").append(regionX).append("_").append(regionY).toString());
+ if (mapscapeId < 0) {
+ System.err.println("Invalid mapscape index for region " + regionId + "!");
+ continue;
+ }
+ boolean abort = false;
+ ByteBuffer buffer = ByteBuffer.wrap(store.getIndexes()[5].getFile(mapscapeId, 0));
+ byte[][][] mapscape = new byte[4][64][64];
+ main: for (int z = 0; z < 4; z++) {
+ for (int x = 0; x < 64; x++) {
+ for (int y = 0; y < 64; y++) {
+ while (true) {
+ int value = buffer.get() & 0xFF;
+ if (value == 0) {
+ break;
+ }
+ if (value == 1) {
+ buffer.get();
+ break;
+ }
+ if (value <= 49) {
+ int overlay = buffer.get() & 0xFF;
+ OverlayDefinition def = OverlayDefinition.forId(store, overlay);
+ if (def != null && def.getTextureId() != 25) {
+ abort = true;
+ break main;
+ }
+ } else if (value <= 81) {
+ mapscape[z][x][y] = (byte) (value - 49);
+ }
+ }
+ }
+ }
+ }
+ if (abort) {
+ missing.add(regionId);
+ }
+ }
+ System.out.println("Missing region count: " + missing.size() + "..");
+ System.out.println(Arrays.toString(missing.toArray()));
+ }
+
+ public static void packLandscape(Store store) throws Throwable {
+ Store s = new Store("./508/");
+ int[] ids = new int[] {13722};
+ for (int regionId : ids) {
+ int regionX = regionId >> 8 & 0xFF;
+ int regionY = regionId & 0xFF;
+ boolean b = store.getIndexes()[5].putArchive(s.getIndexes()[5].getArchiveId(new StringBuilder("m").append(regionX).append("_").append(regionY).toString()), s);
+ System.out.println("Packed landscape (" + regionId + "): " + b);
+ }
+ }
+
+ public static boolean addMapFile(Index index, String name, byte[] data) {
+ int archiveId = index.getArchiveId(name);
+ if(archiveId == -1)
+ archiveId = index.getTable().getValidArchiveIds().length;
+ return index.putFile(archiveId, 0, Constants.GZIP_COMPRESSION, data, null, false, false, Utils.getNameHash(name), -1);
+ }
+
+ public static boolean validRegion(InputStream stream) {
+ int count = 0;
+ for (;;) {
+ int offset = stream.readSmart2();
+ if (offset == 0) {
+ break;
+ }
+ int location = 0;
+ for (;;) {
+ offset = stream.readUnsignedSmart();
+ if (offset == 0) {
+ break;
+ }
+ location += offset - 1;
+ int y = location & 0x3f;
+ int x = location >> 6 & 0x3f;
+ stream.readUnsignedByte();
+ if (x >= 0 && y >= 0 && x < 64 && y < 64) {
+ if (++count > 10) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Copies a file.
+ * @param in The file to be copied.
+ * @param out The file to copy to.
+ */
+ private static void copyFile(File in, File out) {
+ try (FileChannel channel = new FileInputStream(in).getChannel()) {
+ try (FileChannel output = new FileOutputStream(out).getChannel()) {
+ channel.transferTo(0, channel.size(), output);
+ channel.close();
+ output.close();
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/emperor/MapEditor.java b/Tools/Cache Editor/src/emperor/MapEditor.java
new file mode 100644
index 000000000..45669634d
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/MapEditor.java
@@ -0,0 +1,393 @@
+package emperor;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import alex.cache.loaders.OverlayDefinition;
+
+import com.alex.io.InputStream;
+import com.alex.store.Store;
+import com.alex.utils.Constants;
+import com.alex.utils.Utils;
+
+import emperor.ObjectMap.GameObject;
+
+/**
+ * Used for editting maps.
+ * @author Emperor
+ *
+ */
+public final class MapEditor {
+
+ /**
+ * The valid revisions.
+ */
+ private static final int[] VALID_REVISIONS = {
+ 377, 468, 474, 498, 503, 508, 538, 546, 562, 569, 666, 788
+ };
+
+ /**
+ * The xtea keys used to encrypt the maps.
+ */
+ private static final int[] XTEA_KEYS = {
+ 14881828, -6662814, 58238456, 146761213
+ };
+
+ /**
+ * The mapscape type (floor).
+ */
+ private static final String MAP_TYPE = "m";
+
+ /**
+ * The landscape type (objects).
+ */
+ private static final String LAND_TYPE = "l";
+
+ /**
+ * The map cache index.
+ */
+ private static final int MAP_INDEX = 5;
+
+ /**
+ * The cache file store to change.
+ */
+ private static Store store;
+
+ /**
+ * Used the update the maps.
+ * @throws Throwable When an exception occurs.
+ */
+ private static void update() throws Throwable {
+// replaceObjects(new GameObject[][] {
+// { new GameObject(5281, 3666, 3521, 1, 10, 0), new GameObject(5281, 3666, 3520, 1, 10, 0) }
+// });
+// copy(13099, 13099, new Store("./508/"), new int[] { 273193181, -1465876115, -151667950, 40605898 });
+ replaceMapPart(13099, new Store("./468/"), new int[] {-636687345, -1379232722, -1661855973, 666075756}, 18, 36, 31, 49, 0);
+// int regionId = 13099;
+// System.out.println("Revisions for region " + regionId + ": " + Arrays.toString(getValidRevisions(regionId)) + ".");
+ }
+
+ /**
+ * Replaces a part of the map.
+ * @param regionId The region id.
+ * @param from The store to copy from.
+ * @param xteaKeys The XTEA keys used to decrypt the region from the store to copy from.
+ * @param southWestX The south west x (on region) coordinate of the part to replace.
+ * @param southWestY The south west y (on region) coordinate of the part to replace.
+ * @param northEastX The north east x (on region) coordinate of the part to replace.
+ * @param northEastY The north east y (on region) coordinate of the part to replace.
+ */
+ static void replaceMapPart(int regionId, Store from, int[] xteaKeys, int southWestX, int southWestY, int northEastX, int northEastY, int...planes) {
+ ObjectMap map = new ObjectMap();
+ map.map(new InputStream(getLandscape(regionId, store, XTEA_KEYS)));
+ for (Iterator it = map.getObjects().iterator(); it.hasNext();) {
+ GameObject object = it.next();
+ if (object.loc.x >= southWestX && object.loc.x <= northEastX && object.loc.y >= southWestY && object.loc.y <= northEastY) {
+ it.remove();
+ }
+ }
+ ObjectMap m = new ObjectMap();
+ m.map(new InputStream(getLandscape(regionId, from, xteaKeys)));
+ for (GameObject object : m.getObjects()) {
+ for (int z : planes) {
+ if (object.loc.z == z && object.loc.x >= southWestX && object.loc.x <= northEastX && object.loc.y >= southWestY && object.loc.y <= northEastY) {
+ map.getObjects().add(object);
+ break;
+ }
+ }
+ }
+ packLandscape(regionId, map.generate(), store, XTEA_KEYS);
+ LandMap l = new LandMap();
+ l.map(ByteBuffer.wrap(store.getIndexes()[5].getFile(getArchiveIndex(MAP_TYPE, regionId, store))));
+ LandMap lm = new LandMap();
+ lm.map(ByteBuffer.wrap(from.getIndexes()[5].getFile(getArchiveIndex(MAP_TYPE, regionId, from))));
+ for (int z : planes) {
+ for (int x = southWestX; x <= northEastX; x++) {
+ for (int y = southWestY; y <= northEastY; y++) {
+ l.defaultOpcodes[z][x][y] = lm.defaultOpcodes[z][x][y];
+ l.height[z][x][y] = lm.height[z][x][y];
+ l.overlayOpcodes[z][x][y] = lm.overlayOpcodes[z][x][y];
+ l.overlays[z][x][y] = lm.overlays[z][x][y];
+ l.underlays[z][x][y] = lm.underlays[z][x][y];
+ }
+ }
+ }
+ packMapscape(regionId, l.generate(), store);
+ }
+
+ /**
+ * Gets the revisions of the caches having this region.
+ * @param regionId The region id.
+ * @return The cache revisions.
+ */
+ public static int[] getValidRevisions(int regionId) {
+ int[] revisions = new int[VALID_REVISIONS.length];
+ int count = 0;
+ for (int revision : VALID_REVISIONS) {
+ String rev = revision == 498 ? "clean_498" : Integer.toString(revision);
+ try {
+ Store store = new Store("./" + rev + "/");
+ System.out.println("./" + rev + "/");
+ if (getArchiveIndex(LAND_TYPE, regionId, store) > -1) {
+ revisions[count++] = revision;
+ }
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ }
+ return Arrays.copyOf(revisions, count);
+ }
+
+ /**
+ * Copies a region.
+ * @param fromId The region id to copy.
+ * @param toId The region id to paste on.
+ * @param from The store to get the data from.
+ * @param xtea The XTEA keys to decrypt the map.
+ */
+ static void copy(int fromId, int toId, Store from, int[] xtea) {
+ copy(LAND_TYPE, fromId, toId, from, xtea);
+ copy(MAP_TYPE, fromId, toId, from, null);
+ }
+
+ /**
+ * Copies the landscape from a region.
+ * @param fromId The region id to copy the landscape from.
+ * @param toId The region id to paste the landscape on.
+ * @param from the store to get the data from.
+ * @param xtea The XTEA keys to decrypt the landscape.
+ */
+ private static void copy(String type, int fromId, int toId, Store from, int[] xtea) {
+ int index = getArchiveIndex(type, fromId, from);
+ if (index < 0) {
+ throw new IllegalArgumentException("Region " + fromId + " does not exist!");
+ }
+ byte[] bs = from.getIndexes()[MAP_INDEX].getFile(index, 0, xtea);
+ if (bs == null || bs.length < 1) {
+ throw new IllegalArgumentException("Region " + fromId + " is invalid!");
+ }
+ index = getArchiveIndex(type, toId, store);
+ if (index < 0) {
+ index = findEmptyArchive(store, 0);
+ System.out.println("Creating new region - id=" + index + "!");
+ }
+ store.getIndexes()[MAP_INDEX].putFile(index, 0, Constants.GZIP_COMPRESSION, bs, type == LAND_TYPE ? XTEA_KEYS : null, true, true, getNameHash(type, toId), -1);
+ }
+
+ /**
+ * Replaces objects.
+ * @param changes The array of object changes.
+ * @throws Throwable when an exception occurs.
+ */
+ static void replaceObjects(GameObject[][] changes) throws Throwable {
+ Map> objects = new HashMap<>();
+ for (int i = 0; i < changes.length; i++) {
+ GameObject old = changes[i][0];
+ int regionId = (old.loc.x >> 6) << 8 | (old.loc.y >> 6);
+ old = old.getLocal();
+ Map map = objects.get(regionId);
+ if (map == null) {
+ objects.put(regionId, map = new HashMap<>());
+ }
+ GameObject replace = changes[i][1];
+ if (replace != null) {
+ replace = replace.getLocal();
+ }
+ map.put(old, replace);
+ }
+ int count = 0;
+ for (int regionId : objects.keySet()) {
+ Map replacements = objects.get(regionId);
+ ObjectMap map = new ObjectMap();
+ map.map(new InputStream(getLandscape(regionId, store, XTEA_KEYS)));
+ for (GameObject object : replacements.keySet()) {
+ GameObject current = map.get(object);
+ if (current == null) {
+ throw new IllegalArgumentException("Could not find object " + object + "!");
+ }
+ map.getObjects().remove(current);
+ current = replacements.get(object);
+ if (current != null) {
+ map.getObjects().add(current);
+ }
+ count++;
+ }
+ packLandscape(regionId, map.generate(), store, XTEA_KEYS);
+ }
+ System.out.println("Changed " + count + " objects in " + objects.size() + " regions!");
+ }
+
+ /**
+ * Packs the landscape.
+ * @param regionId The region id to pack on.
+ * @param data The landscape data to pack.
+ * @param store The store used.
+ * @param xtea The XTEA keys.
+ */
+ private static void packMapscape(int regionId, byte[] data, Store store) {
+ int index = getArchiveIndex(MAP_TYPE, regionId, store);
+ store.getIndexes()[MAP_INDEX].putFile(index, 0, Constants.GZIP_COMPRESSION, data, null, true, true, getNameHash(MAP_TYPE, regionId), -1);
+ }
+
+ /**
+ * Packs the landscape.
+ * @param regionId The region id to pack on.
+ * @param data The landscape data to pack.
+ * @param store The store used.
+ * @param xtea The XTEA keys.
+ */
+ private static void packLandscape(int regionId, byte[] data, Store store, int[] xtea) {
+ int index = getArchiveIndex(LAND_TYPE, regionId, store);
+ store.getIndexes()[MAP_INDEX].putFile(index, 0, Constants.GZIP_COMPRESSION, data, xtea, true, true, getNameHash("l", regionId), -1);
+ }
+
+ /**
+ * Gets the landscape data.
+ * @param regionId The region id.
+ * @param store The store to get the landscape data from.
+ * @param xtea The XTEA keys used to decrypt the landscape.
+ * @return The landscape data.
+ */
+ private static byte[] getLandscape(int regionId, Store store, int[] xtea) {
+ int index = getArchiveIndex(LAND_TYPE, regionId, store);
+ if (index < 0) {
+ throw new IllegalArgumentException("Region " + regionId + " does not exist!");
+ }
+ byte[] bs = store.getIndexes()[MAP_INDEX].getFile(index, 0, xtea);
+ if (bs == null) {
+ throw new IllegalArgumentException("Region " + regionId + " has no valid landscape!");
+ }
+ return bs;
+ }
+
+ /**
+ * Finds an empty archive id.
+ * @param store The store to check.
+ * @param offset The archive offset to start checking from.
+ * @return The new archive index.
+ */
+ private static int findEmptyArchive(Store store, int offset) {
+ for (int index = offset; index < 50000; index++) {
+ if (!store.getIndexes()[MAP_INDEX].archiveExists(index)) {
+ return index;
+ }
+ }
+ return -1;
+ }
+
+ /**
+ * Gets the name hash for the given region id.
+ * @param type The archive type "m"=mapscape, "l"=landscape.
+ * @param regionId The region id.
+ * @return The name hash.
+ */
+ private static int getNameHash(String type, int regionId) {
+ int x = regionId >> 8 & 0xFF;
+ int y = regionId & 0xFF;
+ return Utils.getNameHash(type + x + "_" + y);
+ }
+
+ /**
+ * Gets the archive index.
+ * @param type The archive type "m"=mapscape, "l"=landscape.
+ * @param regionId The region id.
+ * @param store The store.
+ * @return The archive index.
+ */
+ private static int getArchiveIndex(String type, int regionId, Store store) {
+ int x = regionId >> 8 & 0xFF;
+ int y = regionId & 0xFF;
+ return store.getIndexes()[MAP_INDEX].getArchiveId(type + x + "_" + y);
+ }
+
+ /**
+ * The main method.
+ * @param args The arguments cast on runtime.
+ * @throws Throwable When an exception occurs.
+ */
+ public static void main(String...args) throws Throwable {
+ String revision = "498";
+ if (args.length > 0) {
+ revision = args[0];
+ }
+ System.out.println("Updating revision " + revision + "...");
+ long start = System.currentTimeMillis();
+ store = new Store("./" + revision + "/");
+ update();
+ System.out.println("Finished after " + (System.currentTimeMillis() - start) + " milliseconds.");
+ }
+
+ /**
+ * Checks if the region is valid.
+ * @param regionId The region id.
+ * @param store The store.
+ * @return {@code True} if so.
+ */
+ public static boolean isValid(int regionId, int[] xtea, Store store) {
+ int index = getArchiveIndex("l", regionId, store);
+ if (index > -1) {
+ byte[] bs = store.getIndexes()[MAP_INDEX].getFile(index, 0, xtea);
+ if (bs == null) {
+ if (regionId == 11082) { //Elf city is an empty region
+ return true;
+ }
+ ByteBuffer buffer = ByteBuffer.wrap(store.getIndexes()[5].getFile(getArchiveIndex("m", regionId, store), 0));
+ byte[][][] mapscape = new byte[4][64][64];
+ boolean ocean = true;
+ main: for (int z = 0; z < 4; z++) {
+ for (int i = 0; i < 64; i++) {
+ for (int j = 0; j < 64; j++) {
+ while (true) {
+ int value = buffer.get() & 0xFF;
+ if (value == 0) {
+ break;
+ }
+ if (value == 1) {
+ buffer.get();
+ break;
+ }
+ if (value <= 49) {
+ int overlay = buffer.get() & 0xFF;
+ OverlayDefinition def = OverlayDefinition.forId(store, overlay);
+ if (def != null && def.getTextureId() != 25) {
+ ocean = false;
+ break main;
+ }
+ } else if (value <= 81) {
+ mapscape[z][i][j] = (byte) (value - 49);
+ }
+ }
+ }
+ }
+ }
+ if (!ocean) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Debugs the world map.
+ */
+ static void debugWorldMap() {
+ int regions = 0;
+ int missing = 0;
+ for (int x = 0; x < 255; x++) {
+ for (int y = 0; y < 255; y++) {
+ int regionId = x << 8 | y;
+ if (!isValid(regionId, XTEA_KEYS, store)) {
+ missing++;
+ System.out.println("Missing region " + regionId + "!");
+ }
+ }
+ }
+ System.out.println("World map is missing " + missing + "/" + regions + " regions!");
+ }
+
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/emperor/ModelPacker.java b/Tools/Cache Editor/src/emperor/ModelPacker.java
new file mode 100644
index 000000000..eb55bd2c5
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/ModelPacker.java
@@ -0,0 +1,195 @@
+package emperor;
+
+import java.awt.image.BufferedImage;
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import javax.imageio.ImageIO;
+
+import alex.cache.loaders.OverlayDefinition;
+
+import com.alex.loaders.images.IndexedColorImageFile;
+import com.alex.store.Store;
+
+/**
+ * Packs the models.
+ * @author Emperor
+ *
+ */
+public final class ModelPacker {
+
+ public static void main(String...args) throws Throwable {
+ Store to = new Store("./498/");
+ packDonatorIcons(to);
+ // packObjectDefinitions(from, to);
+ // packAnimations(from, to);
+ // List anims = new ArrayList<>();
+ // for (int i = 0; i < 50_000; i++) {
+ // byte[] data = from.getIndexes()[16].getFile(i >>> 1998118472, i & 0xff);
+ // if (data == null) {
+ // continue;
+ // }
+ // ObjectDefinitions def = new ObjectDefinitions(i);
+ // def.initialize(from);
+ // if (def.animationId > -1) {
+ // if (!anims.contains(def.animationId)) {
+ // anims.add(def.animationId);
+ // }
+ //// System.out.println(def.getName() + " anim: " + def.animationId + ", " + Arrays.toString(def.models));
+ // }
+ // }
+ // System.out.println(Arrays.toString(anims.toArray()));
+ // packAnimations(from, to);
+ }
+
+ static void packObjectDefinitions(Store from, Store to) {
+ int[] defs = new int[] { 5461 };//5099, 5100, 5094, 5096, 5098, 5097, 5110, 5111};//5088, 5089, 5090 };
+ for (int id : defs) {
+ int archive = id >>> 1998118472;
+ int file = id & 0xFF;
+ byte[] bs = from.getIndexes()[16].getFile(archive, file);
+ to.getIndexes()[16].putFile(archive, file, bs);
+ }
+ }
+
+ static void editObjectDefinitions(int itemId, Store store, int opcode, Object value) {
+ int archive = itemId >>> 1998118472;
+ int file = itemId & 0xFF;
+ byte[] bs = store.getIndexes()[16].getFile(archive, file);
+ ByteBuffer buffer = ByteBuffer.allocate(bs.length + 128);
+ for (int i = 0; i < bs.length - 1; i++) {
+ buffer.put(bs[i]);
+ }
+ buffer.put((byte) opcode);
+ if (value instanceof Byte) {
+ buffer.put((Byte) value);
+ }
+ else if (value instanceof Short) {
+ buffer.putShort((Short) value);
+ }
+ else if (value instanceof Integer) {
+ buffer.putInt((Integer) value);
+ }
+ else if (value instanceof Long) {
+ buffer.putLong((Long) value);
+ }
+ else if (value instanceof String) {
+ buffer.put(((String) value).getBytes()).put((byte) 0);
+ }
+ else if (value instanceof Boolean) {
+ buffer.put((byte) ((Boolean) value ? 1 : 0));
+ }
+ bs = new byte[buffer.remaining()];
+ buffer.get(bs);
+ store.getIndexes()[16].putFile(archive, file, bs);
+ }
+
+ static void packSprite(Store to) {
+ int id = 423;
+ IndexedColorImageFile f = null;
+ try {
+ f = new IndexedColorImageFile(to, id, 0);
+ BufferedImage icon = ImageIO.read(new File("green.png"));
+ f.replaceImage(icon, 3);
+ //System.out.println("Added icon: "+f.addImage(icon, 0, 1)+".");
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ to.getIndexes()[8].putFile(id, 0, f.encodeFile());
+ }
+
+ static void packDonatorIcons(Store to) {
+ int id = 423;
+ File[] files = new File("donator_icons").listFiles();
+ IndexedColorImageFile f = null;
+ int index = 0;
+ for (File file : files) {
+ try {
+ f = new IndexedColorImageFile(to, id, 0);
+ BufferedImage icon = ImageIO.read(file);
+ if (index == 0) {
+ f.replaceImage(icon, 3);
+ System.out.println("Replaced icon - " + 3);
+ } else {
+ System.out.println("Added icon: "+f.addImage(icon, 0, 1)+".");
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ to.getIndexes()[8].putFile(id, 0, f.encodeFile());
+ index++;
+ }
+ }
+
+ static void packAnimations(Store from, Store to) {
+ int [] anims = new int[] {4856};//3206, 498, 499, 500, 501, 481, 467, 526, 527, 907, 505, 524, 449, 523, 2709, 1726, 480, 488, 479, 469, 475, 476, 473, 1071, 493, 494, 504, 471, 468, 470, 332, 333, 492, 1731, 472, 491, 503, 522, 456, 464, 2714, 9101, 502, 525, 6023, 6561, 477, 478, 1223, 446, 6913, 912, 917, 474, 1051, 1049, 4860, 1052, 1073, 9123, 3106, 1072, 1096, 1098, 1097, 1103, 1104, 1108, 1112, 1127, 1138, 1211, 1212, 1216, 1233, 1234, 1235, 1231, 1260, 1261, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 8618, 1334, 1433, 1347, 1349, 1348, 1362, 1355, 1416, 1398, 1411, 1430, 1431, 1532, 1533, 1600, 1631, 1629, 1632, 1630, 1636, 1657, 1641, 1642, 1643, 1729, 1733, 1727, 1017, 1734, 1730, 1732, 1747, 1812, 1845, 1846, 1847, 1869, 1868, 1875, 1881, 1908, 1909, 1923, 1915, 1943, 1940, 1936, 1937, 1938, 1939, 1944, 1998, 1999, 2056, 2054, 2091, 2131, 2133, 2136, 2135, 2137, 2178, 2260, 2174, 2173, 2199, 2198, 2196, 2201, 2203, 2204, 2209, 2210, 2212, 5855, 2313, 2331, 2346, 2359, 2360, 2349, 2350, 2348, 2379, 2440, 2439, 2437, 2451, 2564, 4291, 3743, 2598, 4123, 2600, 2641, 2657, 2699, 2708, 2734, 2746, 2747, 2742, 2743, 2744, 2768, 447, 2807, 2870, 2871, 2878, 2883, 2897, 2899, 2901, 2898, 2900, 2905, 2997, 3022, 3029, 3028, 3030, 3038, 3070, 3099, 3100, 3095, 3105, 3107, 3101, 3097, 3104, 3113, 3174, 3173, 3180, 4354, 3217, 3218, 3219, 3172, 3230, 3231, 3237, 3246, 3247, 3264, 6094, 3304, 3305, 3306, 3343, 3347, 3511, 7263, 3349, 3351, 3352, 3408, 3405, 3406, 3407, 3438, 3439, 3440, 3445, 3472, 3528, 3534, 3529, 3530, 3531, 3532, 3478, 3542, 3558, 4477, 4564, 3586, 3573, 3577, 3117, 3118, 5483, 166, 286, 145, 3707, 6218, 6496, 1338, 9241, 3587, 3582, 3647, 3720, 3644, 3646, 3648, 3578, 3579, 3580, 3581, 3615, 3616, 3742, 3835, 3843, 3927, 3932, 3939, 3940, 3943, 3944, 3976, 3998, 4005, 4006, 4015, 4013, 4014, 4022, 3699, 3698, 3700, 4133, 4132, 6477, 4126, 4157, 4161, 4163, 4220, 4217, 4218, 4239, 4260, 4241, 4240, 4242, 4274, 4284, 4308, 4309, 4323, 4324, 4325, 4338, 4339, 4336, 4335, 459, 4357, 4355, 4356, 4358, 4393, 4392, 4408, 4394, 4395, 4396, 4397, 4398, 4377, 4359, 4361, 4360, 4363, 4364, 4399, 4431, 4535, 4565, 4566, 4567, 4568, 4569, 4577, 4557, 4559, 4560, 4563, 4561, 4562, 4572, 4571, 4595, 4599, 4621, 4622, 4627, 4628, 4746, 4747, 4778, 4744, 4745, 4743, 4781, 4798, 4783, 4894, 4879, 4880, 4881, 4895, 4896, 4899, 4883, 4897, 4898, 4900, 4901, 5012, 5044, 5073, 5058, 5141, 5109, 5170, 5169, 5173, 5174, 5175, 5179, 5180, 5176, 5177, 5178, 5197, 5193, 5195, 5196, 5194, 5203, 5220, 5219, 5222, 5221, 5235, 5237, 5239, 5260, 5261, 5267, 5269, 5268, 5271, 5270, 5278, 5308, 5295, 5296, 5297, 5350, 5351, 5360, 5068, 5430, 5415, 5422, 5423, 5429, 5431, 5432, 5603, 5599, 5601, 5600, 5598, 5605, 5631, 5604, 5564, 5740, 5742, 5737, 5745, 5743, 5744, 5738, 5728, 5730, 5729, 5739, 5741, 5734, 5771, 5772, 5768, 5797, 5798, 5828, 5829, 5830, 5844, 5824, 5825, 5847, 5900, 5901, 5906, 5874, 5857, 5909, 5975, 5976, 5977, 5983, 5984, 5985, 5974, 6015, 6069, 6037, 6038, 6036, 6035, 6034, 6029, 6031, 6032, 6027, 6028, 6024, 6025, 6026, 6039, 6123, 6211, 6161, 6162, 6163, 6164, 6165, 6166, 6167, 6168, 6170, 6196, 6269, 6274, 6481, 4130, 4128, 4131, 4129, 6491, 4127, 4125, 6495, 6493, 6494, 6466, 6467, 6492, 4124, 6426, 6461, 6453, 6439, 6522, 6497, 6499, 6500, 6509, 6506, 6523, 6623, 6624, 6625, 6626, 6627, 6597, 6598, 6635, 6637, 6636, 6638, 6639, 6645, 6646, 6652, 6653, 6656, 6737, 6732, 6731, 6733, 6734, 6735, 6736, 6854, 6853, 6873, 6874, 6875, 6912, 6914, 6915, 4782, 6925, 6900, 6901, 6902, 6903, 6917, 6898, 6890, 6891, 6892, 6893, 6894, 6895, 6931, 6932, 6982, 6995, 6996, 7007, 7066, 7067, 7087, 7097, 7118, 7115, 7117, 7120, 7138, 7144, 7146, 7152, 7225, 7226, 7245, 7252, 7231, 7291, 7286, 7283, 7284, 7285, 7352, 7354, 7353, 7361, 7346, 7357, 7356, 7358, 7360, 7375, 7373, 7378, 7379, 7380, 7381, 7544, 7546, 7552, 7577, 7603, 7601, 7580, 7602, 7600, 8526, 8510, 8663, 8664, 8666, 8665, 8653, 8624, 8646, 8654, 8647, 8667, 2418, 8708, 8714, 8735, 7158, 808, 8881, 8845, 8857, 8894, 8892, 8897, 8967, 8968, 8969, 8970, 8972, 8971, 9005, 9011, 9007, 9010, 9008, 9090, 9088, 9089, 9085, 9083, 9084, 9033, 9035, 9036, 9041, 9135, 9122, 4290, 4295, 4296, 9137, 9143, 9144, 9150, 9146, 4297, 9154, 9199, 9303, 9348, 9329, 9330, 9347};
+ for (int i : anims) {
+ byte[] a = from.getIndexes()[20].getFile(i >>> 7, i & 0x7F);
+ if (a == null) {
+ continue;
+ }
+ // i = 10222;//from.getIndexes()[20].getLastArchiveId() + 1;
+ System.out.println("Packed animation " + i + " - " + to.getIndexes()[20].putFile(i >>> 7, i & 0x7F, a));
+ }
+ }
+
+ static void packTextures(Store from, Store to) {
+ for (int i = 0; i < from.getIndexes()[9].getValidFilesCount(0); i++) {
+ byte[] bs = from.getIndexes()[9].getFile(0, i);
+ if (bs == null || bs.length < 1) {
+ System.out.println("Missing texture id " + i);
+ continue;
+ }
+ System.out.println("Packing texture id " + i + ": " + to.getIndexes()[9].putFile(0, i, bs));//+ (i < 200 ? Arrays.toString(bs) : null));//to.getIndexes()[6].putFile(0, i, bs));
+ }
+ }
+
+ /**
+ * Packs the overlays.
+ * @param from The cache to get the data from.
+ * @param to The cache to store the data.
+ */
+ static void packOverlays(Store from, Store to) {
+ System.out.println("Start");
+ // int changeOverlay = 135;
+ // int newOverlay = 135;
+ // System.out.println("Success = " + to.getIndexes()[2].putFile(4, changeOverlay, from.getIndexes()[2].getFile(4, newOverlay)));
+ for (int id = 0; id < to.getIndexes()[2].getValidFilesCount(4); id++) {
+ byte[] bs = to.getIndexes()[2].getFile(4, id);
+ if (bs == null || bs.length < 1) {
+ continue;
+ }
+ OverlayDefinition def = OverlayDefinition.forId(to, id);
+ if (def.getTextureId() > 0) {
+ System.out.println("Packed overlay definition " + id + " - texture=" + def.getTextureId() + "!");
+ // boolean success = to.getIndexes()[2].putFile(4, id, from.getIndexes()[2].getFile(4, id));
+ // System.out.println("Packed overlay definition " + id + " - success=" + success);
+ }
+ }
+ }
+
+ static void packModels(Store from, Store to) {
+ int[] models = new int[] {16400};
+ for (int model : models) {
+ byte[] a = from.getIndexes()[7].getFile(model);
+ if (a == null) {
+ continue;
+ }
+ System.out.println(Arrays.toString(a) + "");
+ to.getIndexes()[7].putFile(1046, 0, a);
+ break;
+ }
+ }
+
+ static void packMusic(Store from, Store to) throws Throwable {
+ for (int i = 0; i < to.getIndexes()[6].getValidArchivesCount(); i++) {
+ byte[] bs = to.getIndexes()[6].getFile(i);
+ if (bs == null || bs.length < 1) {
+ continue;
+ }
+ System.out.println("Packing music id " + i + ": ");// + to.getIndexes()[6].putArchive(i, from));//.putArchive(2, , from));
+ }
+ }
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/emperor/MusicPropertiesPacker.java b/Tools/Cache Editor/src/emperor/MusicPropertiesPacker.java
new file mode 100644
index 000000000..cf9de3fcb
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/MusicPropertiesPacker.java
@@ -0,0 +1,406 @@
+package emperor;
+
+import com.alex.loaders.clientscripts.CS2Mapping;
+import com.alex.store.Store;
+
+
+public final class MusicPropertiesPacker {
+
+ /**
+ * The music zones.
+ */
+
+ /**
+ * Configures the music data.
+ */
+ private static void configureMusic() {
+ //Don't remove anything from this!
+ //Also make sure zones don't overlap!
+ add(40, "brew hoo hoo!", 338, (14747));
+ add(53, "chef surprise", 399, (7507));
+ add(76, "davy jones' locker", 394, (11924));
+ add(102, "etceteria", 227, (10300));
+ add(156, "hells bells", 254, (11066));
+ add(175, "jolly-r", 65, (11058));
+ add(189, "land of the dwarves", 310, (11423));
+ add(205, "mad eadgar", 213, (11677));
+ add(254, "pharaoh's tomb", 355, (13356), (12105));
+ add(259, "pirates of peril", 262, (12093));
+ add(304, "spirits of elid", 331, (13461));
+ add(318, "subterranea", 362, (10142));
+ add(322, "tale of keldagrim", 309, (11678));
+ add(323, "talking forest", 119, (10550));
+ add(328, "cellar dwellers", 342, (10135));
+ add(329, "chosen", 324, (9805));
+ add(330, "desert", 120, (12591));
+ add(331, "desolate isle", 330, (10042));
+ add(333, "far side", 314, (12111));
+ add(335, "genie", 332, (13457));
+ add(336, "the golem", 295, (13616), (13872));
+ add(338, "the lost melody", 315, (13206));
+ add(341, "the mad mole", 393, (6992));
+ add(342, "monsters below", 329, (9886));
+ add(343, "the navigator", 255, (10652));
+ add(345, "other side", 278, (14646));
+ add(347, "quiz master", 318, (7754));
+ add(348, "rogues' den", 313, (11853), (12109));
+ add(349, "shadow", 121, (11314));
+ add(350, "the slayer", 269, (11164));
+ add(351, "terrible tower", 267, (13623));
+ add(352, "tower", 122, (10292), (10136));
+ add(360, "tomorrow", 163, (12081));
+ add(361, "too many cooks...", 398, (11930));
+ add(413, "zogre dance", 306, (9775));
+ add(2, "adventure", 0, (12854));
+ add(5, "alone", 2, (12086), (10134));
+ add(6, "ambient jungle", 3, (11310));
+ add(7, "anywhere", 240, (10795));
+ add(11, "arabique", 7, (11417));
+ add(12, "army of darkness", 8, (12088));
+ add(13, "arrival", 9, (11572));
+ add(14, "artistry", 200, (8010));
+ add(15, "attack 1", 10, (10034));
+ add(16, "attack 2", 11, (11414));
+ add(17, "attack 3", 12, (12192));
+ add(18, "attack 4", 13, (10289), (10389));
+ add(19, "attack 5", 14, (9033));
+ add(20, "attack 6", 15, (10387));
+ add(21, "attention", 16, (11825));
+ add(22, "autumn voyage", 17, (12851));
+ add(23, "aye car rum ba", 351, (8527));
+ add(24, "aztec", 201, (11157));
+ add(25, "background", 18, (11060), (7758));
+ add(26, "ballad of enchantment", 19, (10290));
+ add(27, "bandit camp", 214, (12590));
+ add(28, "barbarianism", 257, (12341), (12441));
+ add(29, "barking mad", 274, (14234));
+ add(30, "baroque", 20, (10547));
+ add(31, "beyond", 21, (11418), (11419));
+ add(32, "big chords", 22, (10032));
+ add(33, "blistering barnacles", 352, (8528));
+ add(34, "body parts", 270, (13979));
+ add(35, "bone dance", 183, (13619));
+ add(36, "bone dry", 216, (12946));
+ add(37, "book of spells", 23, (12593));
+ add(38, "borderland", 233, (10809));
+ add(39, "breeze", 194, (9010));
+ add(42, "bubble and squeak", 347, (7753));
+ add(43, "camelot", 24, (11062));
+ add(44, "castlewars", 247, (9520));
+ add(45, "catch me if you can", 344, (10646));
+ add(46, "cave background", 25, (12184), (11929));
+ add(47, "cave of beasts", 280, (11165));
+ add(48, "cave of the goblins", 304, (12693));
+ add(49, "cavern", 26, (12193), (10388));
+ add(50, "cellar song", 173, (12697));
+ add(51, "chain of command", 27, (10648), (10905));
+ add(52, "chamber", 225, (10821), (11078));
+ add(54, "chickened out", 395, (9796));
+ add(55, "chompy hunt", 178, (10542), (10642));
+ add(56, "city of the dead", 300, (12843), (13099));
+ add(57, "claustrophobia", 291, (9293));
+ add(58, "close quarters", 175, (12602));
+ add(59, "competition", 217, (8781));
+ add(60, "complication", 258, (9035));
+ add(61, "contest", 208, (11576));
+ add(62, "corporal punishment", 323, (12619));
+ add(64, "courage", 260, (11673));
+ add(65, "crystal castle", 210, (9011));
+ add(66, "crystal cave", 28, (9797));
+ add(67, "crystal sword", 29, (12855), (10647));
+ add(68, "cursed", 186, (9623));
+ add(69, "dagannoth dawn", 365, (7236), (7748));
+ add(71, "dance of the undead", 298, (14131));
+ add(72, "dangerous road", 263, (11413));
+ add(73, "dangerous way", 299, (14231));
+ add(74, "dangerous", 30, (12343), (13115));
+ add(75, "dark", 31, (13113));
+ add(77, "dead can dance", 341, (12601));
+ add(78, "dead quiet", 181, (13621), (9294));
+ add(79, "deadlands", 230, (14134));
+ add(80, "deep down", 224, (10823), (10822));
+ add(81, "deep wildy", 32, (11835));
+ add(82, "desert heat", 333, (13614));
+ add(83, "desert voyage", 33, (13102), (13359));
+ add(84, "diango's little helpers", 371, (8005));
+ add(86, "distant land", 353, (13873));
+ add(89, "doorways", 34, (12598));
+ add(90, "down below", 284, (12438));
+ add(91, "down to earth", 259, (10571));
+ add(92, "dragontooth island", 281, (15159));
+ add(93, "dream", 35, (12594));
+ add(95, "dunjun", 36, (11672));
+ add(96, "dynasty", 275, (13358));
+ add(98, "elven mist", 202, (9266));
+ add(99, "emotion", 38, (10033), (10309), (10133));
+ add(100, "emperor", 39, (11570), (11670));
+ add(101, "escape", 176, (10903));
+ add(103, "everlasting fire", 417, (13373));
+ add(104, "everywhere", 219, (8499));
+ add(105, "evil bob's island", 316, (10058));
+ add(106, "expanse", 40, (12605), (12852), (12952));
+ add(107, "expecting", 41, (9778), (9878));
+ add(108, "expedition", 42, (11676));
+ add(109, "exposed", 220, (8752));
+ add(110, "faerie", 43, (9540));
+ add(111, "faithless", 265, (12856));
+ add(112, "fanfare", 44, (11828));
+ add(113, "fanfare 2", 162, (11823));
+ add(114, "fanfare 3", 45, (10545));
+ add(116, "far away", 292, (9265));
+ add(118, "fenkenstrain's refrain", 271, (13879));
+ add(119, "fight or flight", 293, (7752));
+ add(120, "find my way", 246, (10894));
+ add(121, "fire and brimstone", 334, (9552));
+ add(122, "fishing", 46, (11317));
+ add(123, "flute salad", 47, (12595));
+ add(125, "forbidden", 185, (13111));
+ add(126, "forest", 203, (9009));
+ add(127, "forever", 48, (12342), (12442));
+ add(130, "frogland", 336, (9802));
+ add(131, "frostbite", 236, (11323));
+ add(132, "fruits de mer", 273, (11059));
+ add(133, "funny bunnies", 406, (9810));
+ add(134, "gaol", 49, (12090), (10031), (10131));
+ add(135, "garden", 50, (12853));
+ add(136, "gnome king", 51, (9782));
+ add(138, "gnome village", 53, (9781));
+ add(139, "gnome village 2", 54, (9269));
+ add(141, "gnomeball", 56, (9270));
+ add(142, "goblin game", 252, (10393));
+ add(144, "greatness", 57, (12596));
+ add(146, "grotto", 198, (13720));
+ add(148, "grumpy", 177, (10286));
+ add(151, "harmony 2", 167, (12950));
+ add(152, "haunted mine", 222, (11077));
+ add(153, "have a blast", 325, (7757));
+ add(155, "heart and mind", 174, (10059));
+ add(157, "hermit", 191, (9034));
+ add(158, "high seas", 59, (11057));
+ add(159, "horizon", 60, (11573));
+ add(161, "iban", 61, (8519));
+ add(162, "ice melody", 165, (11318));
+ add(163, "in between", 290, (10061));
+ add(164, "in the brine", 370, (14638));
+ add(165, "in the clink", 360, (8261));
+ add(166, "in the manor", 62, (10287));
+ add(167, "in the pits", 335, (9808));
+ add(169, "insect queen", 212, (13972));
+ add(170, "inspiration", 63, (12087));
+ add(171, "into the abyss", 317, (12107));
+ add(172, "intrepid", 64, (9369));
+ add(173, "island life", 242, (10794));
+ add(176, "jungle island", 66, (11313), (11309));
+ add(177, "jungle troubles", 343, (11568));
+ add(178, "jungly 1", 67, (11054), (11154));
+ add(179, "jungly 2", 68, (10802));
+ add(180, "jungly 3", 69, (11055));
+ add(182, "kingdom", 190, (11319));
+ add(183, "knightly", 70, (10291));
+ add(184, "la mort", 192, (8779));
+ add(185, "lair", 229, (13975));
+ add(187, "lament", 381, (12433));
+ add(190, "landlubber", 169, (10801));
+ add(192, "lasting", 71, (10549));
+ add(193, "legend", 235, (10808));
+ add(194, "legion", 72, (12089), (10039));
+ add(196, "lighthouse", 251, (10040));
+ add(197, "lightness", 73, (12599));
+ add(198, "lightwalk", 74, (11061));
+ add(200, "lonesome", 149, (13203));
+ add(201, "long ago", 75, (10544));
+ add(202, "long way home", 76, (11826));
+ add(203, "lost soul", 204, (9008));
+ add(204, "lullaby", 77, (13365), (10551));
+ add(206, "mage arena", 78, (12349), (10057));
+ add(207, "magic dance", 79, (10288));
+ add(208, "magical journey", 80, (10805));
+ add(209, "making waves", 378, (9273), (9272));
+ add(211, "march", 81, (10036));
+ add(212, "marooned", 241, (11562), (12117));
+ add(213, "marzipan", 211, (11166), (11421));
+ add(214, "masquerade", 268, (10908));
+ add(216, "mausoleum", 184, (13722));
+ add(218, "medieval", 82, (13109));
+ add(219, "mellow", 83, (10293));
+ add(220, "melodrama", 248, (9776));
+ add(221, "meridian", 205, (8497));
+ add(223, "miles away", 84, (11571), (10569));
+ add(225, "miracle dance", 85, (11083));
+ add(226, "mirage", 303, (13199));
+ add(227, "miscellania", 226, (10044));
+ add(228, "monarch waltz", 86, (10807));
+ add(229, "monkey madness", 239, (11051));
+ add(230, "monster melee", 272, (12694));
+ add(231, "moody", 87, (12600), (9523));
+ add(232, "morytania", 180, (13622));
+ add(233, "mudskipper melody", 361, (11824));
+ add(234, "narnode's theme", 513, (9882));
+ add(235, "natural", 197, (13620), (9038));
+ add(236, "neverland", 88, (9780));
+ add(239, "nightfall", 90, (12861), (11827));
+ add(241, "no way out", 403, (13209), (12369), (12113));
+ add(242, "nomad", 171, (11056));
+ add(243, "null and void", 400, (10537));
+ add(245, "oriental", 91, (11666));
+ add(246, "out of the deep", 253, (10140));
+ add(247, "over to nardah", 328, (13613));
+ add(248, "overpass", 207, (9267));
+ add(249, "overture", 92, (10806));
+ add(250, "parade", 93, (13110));
+ add(251, "path of peril", 307, (10575));
+ add(253, "pest control", 401, (10536));
+ add(255, "phasmatys", 277, (14746));
+ add(256, "pheasant peasant", 321, (10314));
+ add(258, "principality", 188, (11575));
+ add(260, "quest", 94, (10315));
+ add(261, "rat a tat tat", 345, (11599));
+ add(262, "rat hunt", 349, (11343));
+ add(263, "ready for battle", 249, (9620));
+ add(264, "regal", 95, (13117));
+ add(265, "reggae", 96, (11565));
+ add(266, "reggae 2", 97, (11567));
+ add(267, "rellekka", 231, (10297));
+ add(269, "righteousness", 223, (9803));
+ add(270, "riverside", 98, (10803), (8496));
+ add(272, "romancing the crone", 264, (11068));
+ add(273, "romper chomper", 312, (9263));
+ add(274, "royale", 99, (11671));
+ add(275, "rune essence", 100, (11595));
+ add(276, "sad meadow", 101, (10035), (11081));
+ add(277, "saga", 232, (10296));
+ add(278, "sarcophagus", 283, (12945));
+ add(279, "sarim's vermin", 348, (11926));
+ add(280, "scape cave", 102, (12698), (12437));
+ add(283, "scape sad", 104, (13116));
+ add(286, "scape soft", 159, (11829));
+ add(287, "scape wild", 105, (12857), (12604));
+ add(288, "scarab", 282, (12589));
+ add(290, "sea shanty", 106, (11569));
+ add(289, "sea shanty 2", 107, (12082));
+ add(291, "serenade", 108, (9521));
+ add(292, "serene", 109, (11837), (11936), (11339));
+ add(293, "settlement", 279, (11065));
+ add(294, "shadowland", 228, (13618), (13875), (8526));
+ add(296, "shining", 160, (12858));
+ add(297, "shipwrecked", 276, (14391));
+ add(298, "showdown", 245, (10895));
+ add(300, "sojourn", 209, (11321));
+ add(301, "soundscape", 111, (9774));
+ add(302, "sphinx", 302, (13100));
+ add(303, "spirit", 112, (12597));
+ add(305, "splendour", 113, (11574));
+ add(306, "spooky jungle", 115, (11053), (11668));
+ add(307, "spooky", 114, (12340));
+ add(308, "spooky 2", 218, (13718));
+ add(309, "stagnant", 193, (13876), (8782));
+ add(310, "starlight", 116, (11925), (12949));
+ add(311, "start", 117, (12339));
+ add(312, "still night", 118, (13108));
+ add(313, "stillness", 250, (13977));
+ add(314, "stranded", 234, (11322));
+ add(316, "stratosphere", 195, (8523));
+ add(319, "sunburn", 215, (12846), (13357));
+ add(320, "superstition", 261, (11153));
+ add(324, "tears of guthix", 311, (12948));
+ add(325, "technology", 238, (10310));
+ add(326, "temple of light", 294, (7496));
+ add(327, "temple", 243, (11151));
+ add(353, "theme", 123, (10294), (10138));
+ add(355, "time out", 196, (11591));
+ add(356, "time to mine", 289, (11422));
+ add(357, "tiptoe", 266, (12440));
+// add(358, "title fight", 367, (12696));
+ add(362, "trawler minor", 125, (7755));
+ add(363, "trawler", 124, (7499));
+ add(364, "tree spirits", 126, (9268));
+ add(365, "tremble", 189, (11320));
+ add(367, "tribal background", 127, (11312), (11412));
+ add(368, "tribal", 128, (11311));
+ add(366, "tribal 2", 129, (11566));
+ add(369, "trinity", 130, (10804), (10904));
+ add(371, "troubled", 131, (11833));
+ add(372, "twilight", 179, (10906));
+ add(373, "tzhaar!", 339, (9551));
+ add(374, "undercurrent", 170, (12345));
+ add(376, "underground pass", 134, (9621));
+ add(375, "underground", 132, (13368), (11416));
+ add(377, "understanding", 187, (9547));
+ add(378, "unknown land", 133, (12338));
+ add(379, "upcoming", 135, (10546));
+ add(380, "venture", 136, (13364));
+ add(381, "venture 2", 168, (13464));
+ add(382, "victory is mine", 368, (12696));
+ add(383, "village", 182, (13878));
+ add(384, "vision", 137, (12337), (12436));
+ add(385, "voodoo cult", 138, (9545), (11665));
+ add(386, "voyage", 139, (10038));
+ add(388, "wander", 140, (12083));
+ add(389, "warrior", 237, (10653));
+ add(391, "waterfall", 141, (10037), (10137));
+ add(392, "waterlogged", 199, (13877), (8014));
+ add(394, "wayward", 308, (9875));
+ add(396, "well of voyage", 221, (9366));
+ add(397, "wild side", 340, (12092));
+ add(398, "wilderness", 142, (11832), (12346));
+ add(399, "wilderness 2", 143, (12091));
+ add(400, "wilderness 3", 144, (11834));
+ add(401, "wildwood", 256, (12344));
+ add(402, "witching", 145, (13114));
+ add(403, "woe of the wyvern", 369, (12181));
+ add(405, "wonder", 146, (11831));
+ add(406, "wonderous", 147, (10548));
+ add(407, "woodland", 206, (8498));
+ add(408, "workshop", 148, (12084));
+ add(410, "xenophobe", 366, (7492), (11589));
+ add(411, "yesteryear", 161, (12849));
+ add(412, "zealot", 172, (10827));
+
+ //Al kharid/desert
+ add(3, "al kharid", 1, (13105), (13361));
+ add(8, "arabian2", 5, (13107));
+ add(9, "arabian3", 6, (12848));
+ add(10, "arabian", 4, (13106), (13617));
+ add(94, "duel arena", 164, (13362));
+ add(295, "shine", 110, (13363));
+ add(97, "egypt", 37, (13104));
+ //Brimhaven
+ add(1, "7th realm", 285, (10645), (10644));
+ add(181, "karamja jam", 286, (10900), (10899));
+ add(252, "pathways", 287, (10901));
+ //Tutorial island
+// add(237, "newbie melody", 89, new ZoneBorders(3052, 3055, 3155, 3135));
+ //Lumbridge
+ add(150, "harmony", 58, (12850));
+ }
+
+ static CS2Mapping indexes;
+ static CS2Mapping ids;
+ /**
+ * Adds a new music entry.
+ * @param musicId The music id.
+ * @param name The song name.
+ * @param index The list index.
+ * @param borders The zone borders.
+ */
+ private static void add(int musicId, String name, int index, int... regions) {
+ String n = (String) indexes.getMap().get(index);
+ System.out.print("add(" + ids.getMap().get(index) + ", \"" + n + "\", " + index);
+ for (int id : regions) {
+ System.out.print(", forRegion(" + id + ")");
+ }
+ System.out.println(");");
+ }
+
+ /**
+ * The main method.
+ * @param args The arguments cast on runtime.
+ * @throws Throwable When an exception occurs.
+ */
+ public static void main(String[] args) throws Throwable {
+ Store store = new Store("./666/");
+ indexes = CS2Mapping.forId(1345, store);
+ ids = CS2Mapping.forId(1351, store);
+ configureMusic();
+ }
+}
diff --git a/Tools/Cache Editor/src/emperor/ObjectMap.java b/Tools/Cache Editor/src/emperor/ObjectMap.java
new file mode 100644
index 000000000..d7ad5c455
--- /dev/null
+++ b/Tools/Cache Editor/src/emperor/ObjectMap.java
@@ -0,0 +1,207 @@
+package emperor;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.PriorityQueue;
+import java.util.Queue;
+
+import com.alex.io.InputStream;
+import com.alex.io.OutputStream;
+
+/**
+ * Represents an object map.
+ * @author Emperor
+ *
+ */
+public final class ObjectMap {
+
+ private List objects = new ArrayList<>();
+
+ public void add(int id, int x, int y, int z, int type, int rotation) {
+ objects.add(new GameObject(id, x, y, z, type, rotation));
+ }
+
+ public GameObject get(GameObject object) {
+ return get(object.id, object.loc.x, object.loc.y, object.loc.z, object.type, object.rotation);
+ }
+
+ public GameObject get(int id, int x, int y, int z, int type, int rotation) {
+ for (GameObject object : objects) {
+ Location loc = object.loc;
+ if (object.id == id && loc.x == x && loc.y == y && loc.z == z && object.type == type && object.rotation == rotation) {
+ return object;
+ }
+ }
+ return null;
+ }
+
+ public List getObjects() {
+ return objects;
+ }
+
+ public static void compare(ObjectMap map, ObjectMap m) {
+ if (map.objects.size() != m.objects.size()) {
+ System.err.println("Mismatch [s1=" + map.objects.size() + ", s2=" + m.objects.size() + "]!");
+ return;
+ }
+ Queue queue1 = new PriorityQueue<>(map.objects);
+ Queue queue2 = new PriorityQueue<>(m.objects);
+ while (!queue1.isEmpty()) {
+ int id = queue1.peek().id;
+ int id1 = queue2.peek().id;
+ if (id != id1) {
+ System.err.println("Object id mismatch [o1=" + id + ", o2=" + id1 + "]!");
+ return;
+ }
+ Queue entry = new PriorityQueue<>();
+ Queue entry1 = new PriorityQueue<>();
+ while (!queue1.isEmpty() && (queue1.peek().id == id)) {
+ entry.add(new QueueEntry(queue1.poll()));
+ }
+ while (!queue2.isEmpty() && (queue2.peek().id == id)) {
+ entry1.add(new QueueEntry(queue2.poll()));
+ }
+ if (entry.size() != entry1.size()) {
+ System.err.println("Entry mismatch [s1=" + entry.size() + ", s2=" + entry1.size() + "]!");
+ return;
+ }
+ while (!entry.isEmpty()) {
+ GameObject object = entry.poll().object;
+ GameObject object1 = entry1.poll().object;
+ if (object.loc.getHash() != object1.loc.getHash()) {
+ System.err.println("Location mismatch " + id + "!");
+ return;
+ }
+ if (object.rotation != object1.rotation) {
+ System.err.println("Rotation mismatch " + id + "!");
+ return;
+ }
+ if (object.type != object1.type) {
+ System.err.println("Type mismatch " + id + "!");
+ return;
+ }
+ }
+ }
+ System.out.println("Matching object maps [s1=" + map.objects.size() + ", s2=" + m.objects.size() + "]!");
+ }
+
+ public void map(InputStream stream) {
+ int objectId = -1;
+ for (;;) {
+ int offset = stream.readSmart2();
+ if (offset == 0) {
+ break;
+ }
+ objectId += offset;
+ int location = 0;
+ for (;;) {
+ offset = stream.readUnsignedSmart();
+ if (offset == 0) {
+ break;
+ }
+ location += offset - 1;
+ int y = location & 0x3f;
+ int x = location >> 6 & 0x3f;
+ int configuration = stream.readUnsignedByte();
+ int rotation = configuration & 0x3;
+ int type = configuration >> 2;
+ int z = location >> 12;
+ if (x >= 0 && y >= 0 && x < 64 && y < 64) {
+ add(objectId, x, y, z, type, rotation);
+ } else {
+ System.out.println("Object out of bounds: " + objectId + " - " + x + ", " + y + ", " + z);
+ }
+ }
+ }
+ }
+
+ public byte[] generate() {
+ OutputStream stream = new OutputStream();
+ PriorityQueue queue = new PriorityQueue<>(objects);
+ int offset = -1;
+ while (!queue.isEmpty()) {
+ int id = queue.peek().id;
+ Queue entry = new PriorityQueue<>();
+ while (!queue.isEmpty() && (queue.peek().id == id)) {
+ entry.add(new QueueEntry(queue.poll()));
+ }
+ stream.writeSmart2(id - offset);
+ int location = 0;
+ while (!entry.isEmpty()) {
+ GameObject object = entry.poll().object;
+ stream.writeSmart(1 + (object.loc.getHash() - location));
+ stream.writeByte(object.rotation | object.type << 2);
+ location = object.loc.getHash();
+ }
+ stream.writeSmart(0);
+ offset = id;
+ }
+ stream.writeSmart2(0);
+ byte[] bs = new byte[stream.getOffset()];
+ for (int i = 0; i < stream.getOffset(); i++) {
+ bs[i] = stream.getBuffer()[i];
+ }
+ return bs;
+ }
+
+ public static class GameObject implements Comparable {
+ int id;
+ Location loc;
+ int type;
+ int rotation;
+
+ public GameObject(int id, int x, int y, int z, int type, int rotation) {
+ this.id = id;
+ this.loc = new Location(x, y, z);
+ this.type = type;
+ this.rotation = rotation;
+ }
+
+ public GameObject getLocal() {
+ return new GameObject(id, loc.getRegionX(), loc.getRegionY(), loc.z, type, rotation);
+ }
+
+ @Override
+ public int compareTo(GameObject o) {
+ return id - o.id;
+ }
+
+ @Override
+ public String toString() {
+ return id + ", " + type + ", " + rotation;
+ }
+ }
+
+ public static class QueueEntry implements Comparable {
+ GameObject object;
+ public QueueEntry(GameObject object) {
+ this.object = object;
+ }
+
+ @Override
+ public int compareTo(QueueEntry o) {
+ return object.loc.getHash() - o.object.loc.getHash();
+ }
+ }
+ public static class Location {
+ int x;
+ int y;
+ int z;
+ public Location(int x, int y, int z) {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+
+ public int getRegionX() {
+ return x - ((x >> 6) << 6);
+ }
+
+ public int getRegionY() {
+ return y - ((y >> 6) << 6);
+ }
+ public int getHash() {
+ return z << 12 | x << 6 | y;
+ }
+ }
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4BlockInputStream.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4BlockInputStream.java
new file mode 100644
index 000000000..09b266d87
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4BlockInputStream.java
@@ -0,0 +1,247 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_LEVEL_BASE;
+import static net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_METHOD_LZ4;
+import static net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_METHOD_RAW;
+import static net.jpountz.lz4.LZ4BlockOutputStream.DEFAULT_SEED;
+import static net.jpountz.lz4.LZ4BlockOutputStream.HEADER_LENGTH;
+import static net.jpountz.lz4.LZ4BlockOutputStream.MAGIC;
+import static net.jpountz.lz4.LZ4BlockOutputStream.MAGIC_LENGTH;
+
+import java.io.EOFException;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.zip.Checksum;
+
+import net.jpountz.util.Utils;
+import net.jpountz.xxhash.StreamingXXHash32;
+import net.jpountz.xxhash.XXHash32;
+import net.jpountz.xxhash.XXHashFactory;
+
+/**
+ * {@link InputStream} implementation to decode data written with
+ * {@link LZ4BlockOutputStream}. This class is not thread-safe and does not
+ * support {@link #mark(int)}/{@link #reset()}.
+ * @see LZ4BlockOutputStream
+ */
+public final class LZ4BlockInputStream extends FilterInputStream {
+
+ private final LZ4FastDecompressor decompressor;
+ private final Checksum checksum;
+ private byte[] buffer;
+ private byte[] compressedBuffer;
+ private int originalLen;
+ private int o;
+ private boolean finished;
+
+ /**
+ * Create a new {@link InputStream}.
+ *
+ * @param in the {@link InputStream} to poll
+ * @param decompressor the {@link LZ4FastDecompressor decompressor} instance to
+ * use
+ * @param checksum the {@link Checksum} instance to use, must be
+ * equivalent to the instance which has been used to
+ * write the stream
+ */
+ public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor, Checksum checksum) {
+ super(in);
+ this.decompressor = decompressor;
+ this.checksum = checksum;
+ this.buffer = new byte[0];
+ this.compressedBuffer = new byte[HEADER_LENGTH];
+ o = originalLen = 0;
+ finished = false;
+ }
+
+ /**
+ * Create a new instance using {@link XXHash32} for checksuming.
+ * @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor, Checksum)
+ * @see StreamingXXHash32#asChecksum()
+ */
+ public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor) {
+ this(in, decompressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum());
+ }
+
+ /**
+ * Create a new instance which uses the fastest {@link LZ4FastDecompressor} available.
+ * @see LZ4Factory#fastestInstance()
+ * @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor)
+ */
+ public LZ4BlockInputStream(InputStream in) {
+ this(in, LZ4Factory.fastestInstance().fastDecompressor());
+ }
+
+ @Override
+ public int available() throws IOException {
+ return originalLen - o;
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (finished) {
+ return -1;
+ }
+ if (o == originalLen) {
+ refill();
+ }
+ if (finished) {
+ return -1;
+ }
+ return buffer[o++] & 0xFF;
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ Utils.checkRange(b, off, len);
+ if (finished) {
+ return -1;
+ }
+ if (o == originalLen) {
+ refill();
+ }
+ if (finished) {
+ return -1;
+ }
+ len = Math.min(len, originalLen - o);
+ System.arraycopy(buffer, o, b, off, len);
+ o += len;
+ return len;
+ }
+
+ @Override
+ public int read(byte[] b) throws IOException {
+ return read(b, 0, b.length);
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ if (finished) {
+ return -1;
+ }
+ if (o == originalLen) {
+ refill();
+ }
+ if (finished) {
+ return -1;
+ }
+ final int skipped = (int) Math.min(n, originalLen - o);
+ o += skipped;
+ return skipped;
+ }
+
+ private void refill() throws IOException {
+ readFully(compressedBuffer, HEADER_LENGTH);
+ for (int i = 0; i < MAGIC_LENGTH; ++i) {
+ if (compressedBuffer[i] != MAGIC[i]) {
+ throw new IOException("Stream is corrupted");
+ }
+ }
+ final int token = compressedBuffer[MAGIC_LENGTH] & 0xFF;
+ final int compressionMethod = token & 0xF0;
+ final int compressionLevel = COMPRESSION_LEVEL_BASE + (token & 0x0F);
+ if (compressionMethod != COMPRESSION_METHOD_RAW && compressionMethod != COMPRESSION_METHOD_LZ4) {
+ throw new IOException("Stream is corrupted");
+ }
+ final int compressedLen = Utils.readIntLE(compressedBuffer, MAGIC_LENGTH + 1);
+ originalLen = Utils.readIntLE(compressedBuffer, MAGIC_LENGTH + 5);
+ final int check = Utils.readIntLE(compressedBuffer, MAGIC_LENGTH + 9);
+ assert HEADER_LENGTH == MAGIC_LENGTH + 13;
+ if (originalLen > 1 << compressionLevel
+ || originalLen < 0
+ || compressedLen < 0
+ || (originalLen == 0 && compressedLen != 0)
+ || (originalLen != 0 && compressedLen == 0)
+ || (compressionMethod == COMPRESSION_METHOD_RAW && originalLen != compressedLen)) {
+ throw new IOException("Stream is corrupted");
+ }
+ if (originalLen == 0 && compressedLen == 0) {
+ if (check != 0) {
+ throw new IOException("Stream is corrupted");
+ }
+ finished = true;
+ return;
+ }
+ if (buffer.length < originalLen) {
+ buffer = new byte[Math.max(originalLen, buffer.length * 3 / 2)];
+ }
+ switch (compressionMethod) {
+ case COMPRESSION_METHOD_RAW:
+ readFully(buffer, originalLen);
+ break;
+ case COMPRESSION_METHOD_LZ4:
+ if (compressedBuffer.length < originalLen) {
+ compressedBuffer = new byte[Math.max(compressedLen, compressedBuffer.length * 3 / 2)];
+ }
+ readFully(compressedBuffer, compressedLen);
+ try {
+ final int compressedLen2 = decompressor.decompress(compressedBuffer, 0, buffer, 0, originalLen);
+ if (compressedLen != compressedLen2) {
+ throw new IOException("Stream is corrupted");
+ }
+ } catch (LZ4Exception e) {
+ throw new IOException("Stream is corrupted", e);
+ }
+ break;
+ default:
+ throw new AssertionError();
+ }
+ checksum.reset();
+ checksum.update(buffer, 0, originalLen);
+ if ((int) checksum.getValue() != check) {
+ throw new IOException("Stream is corrupted");
+ }
+ o = 0;
+ }
+
+ private void readFully(byte[] b, int len) throws IOException {
+ int read = 0;
+ while (read < len) {
+ final int r = in.read(b, read, len - read);
+ if (r < 0) {
+ throw new EOFException("Stream ended prematurely");
+ }
+ read += r;
+ }
+ assert len == read;
+ }
+
+ @Override
+ public boolean markSupported() {
+ return false;
+ }
+
+ @SuppressWarnings("sync-override")
+ @Override
+ public void mark(int readlimit) {
+ // unsupported
+ }
+
+ @SuppressWarnings("sync-override")
+ @Override
+ public void reset() throws IOException {
+ throw new IOException("mark/reset not supported");
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + "(in=" + in
+ + ", decompressor=" + decompressor + ", checksum=" + checksum + ")";
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4BlockOutputStream.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4BlockOutputStream.java
new file mode 100644
index 000000000..c0f0800dc
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4BlockOutputStream.java
@@ -0,0 +1,257 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.zip.Checksum;
+
+import net.jpountz.util.Utils;
+import net.jpountz.xxhash.StreamingXXHash32;
+import net.jpountz.xxhash.XXHashFactory;
+
+/**
+ * Streaming LZ4.
+ *
+ * This class compresses data into fixed-size blocks of compressed data.
+ * @see LZ4BlockInputStream
+ */
+public final class LZ4BlockOutputStream extends FilterOutputStream {
+
+ static final byte[] MAGIC = new byte[] { 'L', 'Z', '4', 'B', 'l', 'o', 'c', 'k' };
+ static final int MAGIC_LENGTH = MAGIC.length;
+
+ static final int HEADER_LENGTH =
+ MAGIC_LENGTH // magic bytes
+ + 1 // token
+ + 4 // compressed length
+ + 4 // decompressed length
+ + 4; // checksum
+
+ static final int COMPRESSION_LEVEL_BASE = 10;
+ static final int MIN_BLOCK_SIZE = 64;
+ static final int MAX_BLOCK_SIZE = 1 << (COMPRESSION_LEVEL_BASE + 0x0F);
+
+ static final int COMPRESSION_METHOD_RAW = 0x10;
+ static final int COMPRESSION_METHOD_LZ4 = 0x20;
+
+ static final int DEFAULT_SEED = 0x9747b28c;
+
+ private static int compressionLevel(int blockSize) {
+ if (blockSize < MIN_BLOCK_SIZE) {
+ throw new IllegalArgumentException("blockSize must be >= " + MIN_BLOCK_SIZE + ", got " + blockSize);
+ } else if (blockSize > MAX_BLOCK_SIZE) {
+ throw new IllegalArgumentException("blockSize must be <= " + MAX_BLOCK_SIZE + ", got " + blockSize);
+ }
+ int compressionLevel = 32 - Integer.numberOfLeadingZeros(blockSize - 1); // ceil of log2
+ assert (1 << compressionLevel) >= blockSize;
+ assert blockSize * 2 > (1 << compressionLevel);
+ compressionLevel = Math.max(0, compressionLevel - COMPRESSION_LEVEL_BASE);
+ assert compressionLevel >= 0 && compressionLevel <= 0x0F;
+ return compressionLevel;
+ }
+
+ private final int blockSize;
+ private final int compressionLevel;
+ private final LZ4Compressor compressor;
+ private final Checksum checksum;
+ private final byte[] buffer;
+ private final byte[] compressedBuffer;
+ private final boolean syncFlush;
+ private boolean finished;
+ private int o;
+
+ /**
+ * Create a new {@link OutputStream} with configurable block size. Large
+ * blocks require more memory at compression and decompression time but
+ * should improve the compression ratio.
+ *
+ * @param out the {@link OutputStream} to feed
+ * @param blockSize the maximum number of bytes to try to compress at once,
+ * must be >= 64 and <= 32 M
+ * @param compressor the {@link LZ4Compressor} instance to use to compress
+ * data
+ * @param checksum the {@link Checksum} instance to use to check data for
+ * integrity.
+ * @param syncFlush true if pending data should also be flushed on {@link #flush()}
+ */
+ public LZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor, Checksum checksum, boolean syncFlush) {
+ super(out);
+ this.blockSize = blockSize;
+ this.compressor = compressor;
+ this.checksum = checksum;
+ this.compressionLevel = compressionLevel(blockSize);
+ this.buffer = new byte[blockSize];
+ final int compressedBlockSize = HEADER_LENGTH + compressor.maxCompressedLength(blockSize);
+ this.compressedBuffer = new byte[compressedBlockSize];
+ this.syncFlush = syncFlush;
+ o = 0;
+ finished = false;
+ System.arraycopy(MAGIC, 0, compressedBuffer, 0, MAGIC_LENGTH);
+ }
+
+ /**
+ * Create a new instance which checks stream integrity using
+ * {@link StreamingXXHash32} and doesn't sync flush.
+ * @see #LZ4BlockOutputStream(OutputStream, int, LZ4Compressor, Checksum, boolean)
+ * @see StreamingXXHash32#asChecksum()
+ */
+ public LZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor) {
+ this(out, blockSize, compressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum(), false);
+ }
+
+ /**
+ * Create a new instance which compresses with the standard LZ4 compression
+ * algorithm.
+ * @see #LZ4BlockOutputStream(OutputStream, int, LZ4Compressor)
+ * @see LZ4Factory#fastCompressor()
+ */
+ public LZ4BlockOutputStream(OutputStream out, int blockSize) {
+ this(out, blockSize, LZ4Factory.fastestInstance().fastCompressor());
+ }
+
+ /**
+ * Create a new instance which compresses into blocks of 64 KB.
+ * @see #LZ4BlockOutputStream(OutputStream, int)
+ */
+ public LZ4BlockOutputStream(OutputStream out) {
+ this(out, 1 << 16);
+ }
+
+ private void ensureNotFinished() {
+ if (finished) {
+ throw new IllegalStateException("This stream is already closed");
+ }
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ ensureNotFinished();
+ if (o == blockSize) {
+ flushBufferedData();
+ }
+ buffer[o++] = (byte) b;
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ Utils.checkRange(b, off, len);
+ ensureNotFinished();
+
+ while (o + len > blockSize) {
+ final int l = blockSize - o;
+ System.arraycopy(b, off, buffer, o, blockSize - o);
+ o = blockSize;
+ flushBufferedData();
+ off += l;
+ len -= l;
+ }
+ System.arraycopy(b, off, buffer, o, len);
+ o += len;
+ }
+
+ @Override
+ public void write(byte[] b) throws IOException {
+ ensureNotFinished();
+ write(b, 0, b.length);
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (!finished) {
+ finish();
+ }
+ if (out != null) {
+ out.close();
+ out = null;
+ }
+ }
+
+ private void flushBufferedData() throws IOException {
+ if (o == 0) {
+ return;
+ }
+ checksum.reset();
+ checksum.update(buffer, 0, o);
+ final int check = (int) checksum.getValue();
+ int compressedLength = compressor.compress(buffer, 0, o, compressedBuffer, HEADER_LENGTH);
+ final int compressMethod;
+ if (compressedLength >= o) {
+ compressMethod = COMPRESSION_METHOD_RAW;
+ compressedLength = o;
+ System.arraycopy(buffer, 0, compressedBuffer, HEADER_LENGTH, o);
+ } else {
+ compressMethod = COMPRESSION_METHOD_LZ4;
+ }
+
+ compressedBuffer[MAGIC_LENGTH] = (byte) (compressMethod | compressionLevel);
+ writeIntLE(compressedLength, compressedBuffer, MAGIC_LENGTH + 1);
+ writeIntLE(o, compressedBuffer, MAGIC_LENGTH + 5);
+ writeIntLE(check, compressedBuffer, MAGIC_LENGTH + 9);
+ assert MAGIC_LENGTH + 13 == HEADER_LENGTH;
+ out.write(compressedBuffer, 0, HEADER_LENGTH + compressedLength);
+ o = 0;
+ }
+
+ /**
+ * Flush this compressed {@link OutputStream}.
+ *
+ * If the stream has been created with syncFlush=true, pending
+ * data will be compressed and appended to the underlying {@link OutputStream}
+ * before calling {@link OutputStream#flush()} on the underlying stream.
+ * Otherwise, this method just flushes the underlying stream, so pending
+ * data might not be available for reading until {@link #finish()} or
+ * {@link #close()} is called.
+ */
+ @Override
+ public void flush() throws IOException {
+ if (syncFlush) {
+ flushBufferedData();
+ }
+ out.flush();
+ }
+
+ /**
+ * Same as {@link #close()} except that it doesn't close the underlying stream.
+ * This can be useful if you want to keep on using the underlying stream.
+ */
+ public void finish() throws IOException {
+ ensureNotFinished();
+ flushBufferedData();
+ compressedBuffer[MAGIC_LENGTH] = (byte) (COMPRESSION_METHOD_RAW | compressionLevel);
+ writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 1);
+ writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 5);
+ writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 9);
+ assert MAGIC_LENGTH + 13 == HEADER_LENGTH;
+ out.write(compressedBuffer, 0, HEADER_LENGTH);
+ finished = true;
+ out.flush();
+ }
+
+ private static void writeIntLE(int i, byte[] buf, int off) {
+ buf[off++] = (byte) i;
+ buf[off++] = (byte) (i >>> 8);
+ buf[off++] = (byte) (i >>> 16);
+ buf[off++] = (byte) (i >>> 24);
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + "(out=" + out + ", blockSize=" + blockSize
+ + ", compressor=" + compressor + ", checksum=" + checksum + ")";
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Compressor.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Compressor.java
new file mode 100644
index 000000000..53fc7764a
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Compressor.java
@@ -0,0 +1,98 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+
+/**
+ * LZ4 compressor.
+ *
+ * Instances of this class are thread-safe.
+ */
+public abstract class LZ4Compressor {
+
+ /** Return the maximum compressed length for an input of size length. */
+ @SuppressWarnings("static-method")
+ public final int maxCompressedLength(int length) {
+ return LZ4Utils.maxCompressedLength(length);
+ }
+
+ /**
+ * Compress src[srcOff:srcOff+srcLen] into
+ * dest[destOff:destOff+destLen] and return the compressed
+ * length.
+ *
+ * This method will throw a {@link LZ4Exception} if this compressor is unable
+ * to compress the input into less than maxDestLen bytes. To
+ * prevent this exception to be thrown, you should make sure that
+ * maxDestLen >= maxCompressedLength(srcLen).
+ *
+ * @throws LZ4Exception if maxDestLen is too small
+ * @return the compressed size
+ */
+ public abstract int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #compress(byte[], int, int, byte[], int, int) compress(src, srcOff, srcLen, dest, destOff, dest.length - destOff)}.
+ */
+ public final int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff) {
+ return compress(src, srcOff, srcLen, dest, destOff, dest.length - destOff);
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #compress(byte[], int, int, byte[], int) compress(src, 0, src.length, dest, 0)}.
+ */
+ public final int compress(byte[] src, byte[] dest) {
+ return compress(src, 0, src.length, dest, 0);
+ }
+
+ /**
+ * Convenience method which returns src[srcOff:srcOff+srcLen]
+ * compressed.
+ *
Warning: this method has an
+ * important overhead due to the fact that it needs to allocate a buffer to
+ * compress into, and then needs to resize this buffer to the actual
+ * compressed length.
+ * Here is how this method is implemented:
+ *
+ * final int maxCompressedLength = maxCompressedLength(srcLen);
+ * final byte[] compressed = new byte[maxCompressedLength];
+ * final int compressedLength = compress(src, srcOff, srcLen, compressed, 0);
+ * return Arrays.copyOf(compressed, compressedLength);
+ *
+ */
+ public final byte[] compress(byte[] src, int srcOff, int srcLen) {
+ final int maxCompressedLength = maxCompressedLength(srcLen);
+ final byte[] compressed = new byte[maxCompressedLength];
+ final int compressedLength = compress(src, srcOff, srcLen, compressed, 0);
+ return Arrays.copyOf(compressed, compressedLength);
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #compress(byte[], int, int) compress(src, 0, src.length)}.
+ */
+ public final byte[] compress(byte[] src) {
+ return compress(src, 0, src.length);
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName();
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Constants.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Constants.java
new file mode 100644
index 000000000..6e642b63b
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Constants.java
@@ -0,0 +1,50 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+enum LZ4Constants {
+ ;
+
+ static final int MEMORY_USAGE = 14;
+ static final int NOT_COMPRESSIBLE_DETECTION_LEVEL = 6;
+
+ static final int MIN_MATCH = 4;
+
+ static final int HASH_LOG = MEMORY_USAGE - 2;
+ static final int HASH_TABLE_SIZE = 1 << HASH_LOG;
+
+ static final int SKIP_STRENGTH = Math.max(NOT_COMPRESSIBLE_DETECTION_LEVEL, 2);
+ static final int COPY_LENGTH = 8;
+ static final int LAST_LITERALS = 5;
+ static final int MF_LIMIT = COPY_LENGTH + MIN_MATCH;
+ static final int MIN_LENGTH = MF_LIMIT + 1;
+
+ static final int MAX_DISTANCE = 1 << 16;
+
+ static final int ML_BITS = 4;
+ static final int ML_MASK = (1 << ML_BITS) - 1;
+ static final int RUN_BITS = 8 - ML_BITS;
+ static final int RUN_MASK = (1 << RUN_BITS) - 1;
+
+ static final int LZ4_64K_LIMIT = (1 << 16) + (MF_LIMIT - 1);
+ static final int HASH_LOG_64K = HASH_LOG + 1;
+ static final int HASH_TABLE_SIZE_64K = 1 << HASH_LOG_64K;
+
+ static final int HASH_LOG_HC = 15;
+ static final int HASH_TABLE_SIZE_HC = 1 << HASH_LOG_HC;
+ static final int OPTIMAL_ML = ML_MASK - 1 + MIN_MATCH;
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Decompressor.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Decompressor.java
new file mode 100644
index 000000000..6b2c18335
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Decompressor.java
@@ -0,0 +1,25 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @deprecated Use {@link LZ4FastDecompressor} instead.
+ */
+@Deprecated
+public interface LZ4Decompressor {
+
+ int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen);
+
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Exception.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Exception.java
new file mode 100644
index 000000000..cb45c0aa3
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Exception.java
@@ -0,0 +1,36 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * LZ4 compression or decompression error.
+ */
+public class LZ4Exception extends RuntimeException {
+
+ private static final long serialVersionUID = 1L;
+
+ public LZ4Exception(String msg, Throwable t) {
+ super(msg, t);
+ }
+
+ public LZ4Exception(String msg) {
+ super(msg);
+ }
+
+ public LZ4Exception() {
+ super();
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Factory.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Factory.java
new file mode 100644
index 000000000..223c95841
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Factory.java
@@ -0,0 +1,222 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.util.Arrays;
+
+import net.jpountz.util.Native;
+
+/**
+ * Entry point for the LZ4 API.
+ *
+ * This class has 3 instances
+ * - a {@link #nativeInstance() native} instance which is a JNI binding to
+ * the original LZ4 C implementation.
+ *
- a {@link #safeInstance() safe Java} instance which is a pure Java port
+ * of the original C library,
+ * - an {@link #unsafeInstance() unsafe Java} instance which is a Java port
+ * using the unofficial {@link sun.misc.Unsafe} API.
+ *
+ *
+ * Only the {@link #safeInstance() safe instance} is guaranteed to work on your
+ * JVM, as a consequence it is advised to use the {@link #fastestInstance()} or
+ * {@link #fastestJavaInstance()} to pull a {@link LZ4Factory} instance.
+ *
+ * All methods from this class are very costly, so you should get an instance
+ * once, and then reuse it whenever possible. This is typically done by storing
+ * a {@link LZ4Factory} instance in a static field.
+ */
+public final class LZ4Factory {
+
+ private static LZ4Factory instance(String impl) {
+ try {
+ return new LZ4Factory(impl);
+ } catch (Exception e) {
+ throw new AssertionError(e);
+ }
+ }
+
+ private static LZ4Factory NATIVE_INSTANCE,
+ JAVA_UNSAFE_INSTANCE,
+ JAVA_SAFE_INSTANCE;
+
+ /**
+ * Return a {@link LZ4Factory} instance that returns compressors and
+ * decompressors that are native bindings to the original C library.
+ *
+ * Please note that this instance has some traps you should be aware of:
+ * - Upon loading this instance, files will be written to the temporary
+ * directory of the system. Although these files are supposed to be deleted
+ * when the JVM exits, they might remain on systems that don't support
+ * removal of files being used such as Windows.
+ *
- The instance can only be loaded once per JVM. This can be a problem
+ * if your application uses multiple class loaders (such as most servlet
+ * containers): this instance will only be available to the children of the
+ * class loader which has loaded it. As a consequence, it is advised to
+ * either not use this instance in webapps or to put this library in the lib
+ * directory of your servlet container so that it is loaded by the system
+ * class loader.
+ *
+ */
+ public static synchronized LZ4Factory nativeInstance() {
+ if (NATIVE_INSTANCE == null) {
+ NATIVE_INSTANCE = instance("JNI");
+ }
+ return NATIVE_INSTANCE;
+ }
+
+ /** Return a {@link LZ4Factory} instance that returns compressors and
+ * decompressors that are written with Java's official API. */
+ public static synchronized LZ4Factory safeInstance() {
+ if (JAVA_SAFE_INSTANCE == null) {
+ JAVA_SAFE_INSTANCE = instance("JavaSafe");
+ }
+ return JAVA_SAFE_INSTANCE;
+ }
+
+ /** Return a {@link LZ4Factory} instance that returns compressors and
+ * decompressors that may use {@link sun.misc.Unsafe} to speed up compression
+ * and decompression. */
+ public static synchronized LZ4Factory unsafeInstance() {
+ if (JAVA_UNSAFE_INSTANCE == null) {
+ JAVA_UNSAFE_INSTANCE = instance("JavaUnsafe");
+ }
+ return JAVA_UNSAFE_INSTANCE;
+ }
+
+ /**
+ * Return the fastest available {@link LZ4Factory} instance which does not
+ * rely on JNI bindings. It first tries to load the
+ * {@link #unsafeInstance() unsafe instance}, and then the
+ * {@link #safeInstance() safe Java instance} if the JVM doesn't have a
+ * working {@link sun.misc.Unsafe}.
+ */
+ public static LZ4Factory fastestJavaInstance() {
+ try {
+ return unsafeInstance();
+ } catch (Throwable t) {
+ return safeInstance();
+ }
+ }
+
+ /**
+ * Return the fastest available {@link LZ4Factory} instance. If the class
+ * loader is the system class loader and if the
+ * {@link #nativeInstance() native instance} loads successfully, then the
+ * {@link #nativeInstance() native instance} is returned, otherwise the
+ * {@link #fastestJavaInstance() fastest Java instance} is returned.
+ *
+ * Please read {@link #nativeInstance() javadocs of nativeInstance()} before
+ * using this method.
+ */
+ public static LZ4Factory fastestInstance() {
+ if (Native.isLoaded()
+ || Native.class.getClassLoader() == ClassLoader.getSystemClassLoader()) {
+ try {
+ return nativeInstance();
+ } catch (Throwable t) {
+ return fastestJavaInstance();
+ }
+ } else {
+ return fastestJavaInstance();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private static T classInstance(String cls) throws NoSuchFieldException, SecurityException, ClassNotFoundException, IllegalArgumentException, IllegalAccessException {
+ final Class> c = Class.forName(cls);
+ Field f = c.getField("INSTANCE");
+ return (T) f.get(null);
+ }
+
+ private final String impl;
+ private final LZ4Compressor fastCompressor;
+ private final LZ4Compressor highCompressor;
+ private final LZ4FastDecompressor fastDecompressor;
+ private final LZ4SafeDecompressor safeDecompressor;
+
+ private LZ4Factory(String impl) throws ClassNotFoundException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException {
+ this.impl = impl;
+ fastCompressor = classInstance("net.jpountz.lz4.LZ4" + impl + "Compressor");
+ highCompressor = classInstance("net.jpountz.lz4.LZ4HC" + impl + "Compressor");
+ fastDecompressor = classInstance("net.jpountz.lz4.LZ4" + impl + "FastDecompressor");
+ safeDecompressor = classInstance("net.jpountz.lz4.LZ4" + impl + "SafeDecompressor");
+
+ // quickly test that everything works as expected
+ final byte[] original = new byte[] {'a','b','c','d',' ',' ',' ',' ',' ',' ','a','b','c','d','e','f','g','h','i','j'};
+ for (LZ4Compressor compressor : Arrays.asList(fastCompressor, highCompressor)) {
+ final int maxCompressedLength = compressor.maxCompressedLength(original.length);
+ final byte[] compressed = new byte[maxCompressedLength];
+ final int compressedLength = compressor.compress(original, 0, original.length, compressed, 0, maxCompressedLength);
+ final byte[] restored = new byte[original.length];
+ fastDecompressor.decompress(compressed, 0, restored, 0, original.length);
+ if (!Arrays.equals(original, restored)) {
+ throw new AssertionError();
+ }
+ Arrays.fill(restored, (byte) 0);
+ final int decompressedLength = safeDecompressor.decompress(compressed, 0, compressedLength, restored, 0);
+ if (decompressedLength != original.length || !Arrays.equals(original, restored)) {
+ throw new AssertionError();
+ }
+ }
+
+ }
+
+ /** Return a blazing fast {@link LZ4Compressor}. */
+ public LZ4Compressor fastCompressor() {
+ return fastCompressor;
+ }
+
+ /** Return a {@link LZ4Compressor} which requires more memory than
+ * {@link #fastCompressor()} and is slower but compresses more efficiently. */
+ public LZ4Compressor highCompressor() {
+ return highCompressor;
+ }
+
+ /** Return a {@link LZ4FastDecompressor} instance. */
+ public LZ4FastDecompressor fastDecompressor() {
+ return fastDecompressor;
+ }
+
+ /** Return a {@link LZ4SafeDecompressor} instance. */
+ public LZ4SafeDecompressor safeDecompressor() {
+ return safeDecompressor;
+ }
+
+ /** Return a {@link LZ4UnknownSizeDecompressor} instance.
+ * @deprecated use {@link #safeDecompressor()} */
+ public LZ4UnknownSizeDecompressor unknwonSizeDecompressor() {
+ return safeDecompressor();
+ }
+
+ /** Return a {@link LZ4Decompressor} instance.
+ * @deprecated use {@link #fastDecompressor()} */
+ public LZ4Decompressor decompressor() {
+ return fastDecompressor();
+ }
+
+ /** Prints the fastest instance. */
+ public static void main(String[] args) {
+ System.out.println("Fastest instance is " + fastestInstance());
+ System.out.println("Fastest Java instance is " + fastestJavaInstance());
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + ":" + impl;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4FastDecompressor.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4FastDecompressor.java
new file mode 100644
index 000000000..3281e823c
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4FastDecompressor.java
@@ -0,0 +1,90 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * LZ4 decompressor that requires the size of the original input to be known.
+ * Use {@link LZ4SafeDecompressor} if you only know the size of the
+ * compressed stream.
+ *
+ * Instances of this class are thread-safe.
+ */
+public abstract class LZ4FastDecompressor implements LZ4Decompressor {
+
+ /** Decompress src[srcOff:] into dest[destOff:destOff+destLen]
+ * and return the number of bytes read from src.
+ * destLen must be exactly the size of the decompressed data.
+ *
+ * @param destLen the exact size of the original input
+ * @return the number of bytes read to restore the original input
+ */
+ public abstract int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen);
+
+ /**
+ * Same as {@link #decompress(byte[], int, byte[], int, int)} except that up
+ * to 64 KB before srcOff in src. This is useful for
+ * providing LZ4 with a dictionary that can be reused during decompression.
+ */
+ public abstract int decompressWithPrefix64k(byte[] src, int srcOff, byte[] dest, int destOff, int destLen);
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], int, byte[], int, int) decompress(src, 0, dest, 0, destLen)}.
+ */
+ public final int decompress(byte[] src, byte[] dest, int destLen) {
+ return decompress(src, 0, dest, 0, destLen);
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], byte[], int) decompress(src, dest, dest.length)}.
+ */
+ public final int decompress(byte[] src, byte[] dest) {
+ return decompress(src, dest, dest.length);
+ }
+
+ /**
+ * Convenience method which returns src[srcOff:?]
+ * decompressed.
+ *
Warning: this method has an
+ * important overhead due to the fact that it needs to allocate a buffer to
+ * decompress into.
+ * Here is how this method is implemented:
+ *
+ * final byte[] decompressed = new byte[destLen];
+ * decompress(src, srcOff, decompressed, 0, destLen);
+ * return decompressed;
+ *
+ */
+ public final byte[] decompress(byte[] src, int srcOff, int destLen) {
+ final byte[] decompressed = new byte[destLen];
+ decompress(src, srcOff, decompressed, 0, destLen);
+ return decompressed;
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], int, int) decompress(src, 0, destLen)}.
+ */
+ public final byte[] decompress(byte[] src, int destLen) {
+ return decompress(src, 0, destLen);
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName();
+ }
+
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4HCJNICompressor.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4HCJNICompressor.java
new file mode 100644
index 000000000..ec09353d4
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4HCJNICompressor.java
@@ -0,0 +1,38 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static net.jpountz.util.Utils.checkRange;
+
+/**
+ * High compression {@link LZ4Compressor}s implemented with JNI bindings to the
+ * original C implementation of LZ4.
+ */
+final class LZ4HCJNICompressor extends LZ4Compressor {
+
+ public static final LZ4Compressor INSTANCE = new LZ4HCJNICompressor();
+
+ @Override
+ public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
+ checkRange(src, srcOff, srcLen);
+ checkRange(dest, destOff, maxDestLen);
+ final int result = LZ4JNI.LZ4_compressHC(src, srcOff, srcLen, dest, destOff, maxDestLen);
+ if (result <= 0) {
+ throw new LZ4Exception();
+ }
+ return result;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNI.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNI.java
new file mode 100644
index 000000000..1c02ff1c7
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNI.java
@@ -0,0 +1,41 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import net.jpountz.util.Native;
+
+
+/**
+ * JNI bindings to the original C implementation of LZ4.
+ */
+enum LZ4JNI {
+ ;
+
+ static {
+ Native.load();
+ init();
+ }
+
+ static native void init();
+ static native int LZ4_compress_limitedOutput(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+ static native int LZ4_compressHC(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+ static native int LZ4_decompress_fast(byte[] src, int srcOff, byte[] dest, int destOff, int destLen);
+ static native int LZ4_decompress_fast_withPrefix64k(byte[] src, int srcOff, byte[] dest, int destOff, int destLen);
+ static native int LZ4_decompress_safe(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+ static native int LZ4_decompress_safe_withPrefix64k(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+ static native int LZ4_compressBound(int len);
+
+}
+
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNICompressor.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNICompressor.java
new file mode 100644
index 000000000..abfdb8850
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNICompressor.java
@@ -0,0 +1,37 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static net.jpountz.util.Utils.checkRange;
+
+/**
+ * Fast {@link LZ4FastCompressor}s implemented with JNI bindings to the original C
+ * implementation of LZ4.
+ */
+final class LZ4JNICompressor extends LZ4Compressor {
+
+ public static final LZ4Compressor INSTANCE = new LZ4JNICompressor();
+
+ @Override
+ public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
+ checkRange(src, srcOff, srcLen);
+ checkRange(dest, destOff, maxDestLen);
+ final int result = LZ4JNI.LZ4_compress_limitedOutput(src, srcOff, srcLen, dest, destOff, maxDestLen);
+ if (result <= 0) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ return result;
+ }
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNIFastDecompressor.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNIFastDecompressor.java
new file mode 100644
index 000000000..6aa1963fc
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNIFastDecompressor.java
@@ -0,0 +1,48 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static net.jpountz.util.Utils.checkRange;
+
+/**
+ * {@link LZ4FastDecompressor} implemented with JNI bindings to the original C
+ * implementation of LZ4.
+ */
+final class LZ4JNIFastDecompressor extends LZ4FastDecompressor {
+
+ public static final LZ4JNIFastDecompressor INSTANCE = new LZ4JNIFastDecompressor();
+
+ @Override
+ public final int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen) {
+ checkRange(src, srcOff);
+ checkRange(dest, destOff, destLen);
+ final int result = LZ4JNI.LZ4_decompress_fast(src, srcOff, dest, destOff, destLen);
+ if (result < 0) {
+ throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
+ }
+ return result;
+ }
+
+ @Override
+ public final int decompressWithPrefix64k(byte[] src, int srcOff, byte[] dest, int destOff, int destLen) {
+ checkRange(src, srcOff);
+ checkRange(dest, destOff, destLen);
+ final int result = LZ4JNI.LZ4_decompress_fast_withPrefix64k(src, srcOff, dest, destOff, destLen);
+ if (result < 0) {
+ throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
+ }
+ return result;
+ }
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNISafeDecompressor.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNISafeDecompressor.java
new file mode 100644
index 000000000..02802facb
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4JNISafeDecompressor.java
@@ -0,0 +1,48 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static net.jpountz.util.Utils.checkRange;
+
+/**
+ * {@link LZ4SafeDecompressor} implemented with JNI bindings to the original C
+ * implementation of LZ4.
+ */
+final class LZ4JNISafeDecompressor extends LZ4SafeDecompressor {
+
+ public static final LZ4SafeDecompressor INSTANCE = new LZ4JNISafeDecompressor();
+
+ @Override
+ public final int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
+ checkRange(src, srcOff, srcLen);
+ checkRange(dest, destOff, maxDestLen);
+ final int result = LZ4JNI.LZ4_decompress_safe(src, srcOff, srcLen, dest, destOff, maxDestLen);
+ if (result < 0) {
+ throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
+ }
+ return result;
+ }
+
+ @Override
+ public final int decompressWithPrefix64k(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
+ checkRange(src, srcOff, srcLen);
+ checkRange(dest, destOff, maxDestLen);
+ final int result = LZ4JNI.LZ4_decompress_safe_withPrefix64k(src, srcOff, srcLen, dest, destOff, maxDestLen);
+ if (result < 0) {
+ throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
+ }
+ return result;
+ }
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4SafeDecompressor.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4SafeDecompressor.java
new file mode 100644
index 000000000..c4ad28a54
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4SafeDecompressor.java
@@ -0,0 +1,109 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+
+/**
+ * LZ4 decompressor that requires the size of the compressed data to be known.
+ *
+ * Implementations of this class are usually a little slower than those of
+ * {@link LZ4FastDecompressor} but do not require the size of the original data to
+ * be known.
+ */
+public abstract class LZ4SafeDecompressor implements LZ4UnknownSizeDecompressor {
+
+ /**
+ * Uncompress src[srcOff:srcLen] into
+ * dest[destOff:destOff+maxDestLen] and returns the number of
+ * decompressed bytes written into dest.
+ *
+ * @param srcLen the exact size of the compressed stream
+ * @return the original input size
+ * @throws LZ4Exception if maxDestLen is too small
+ */
+ public abstract int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+
+ /**
+ * Same as {@link #decompress(byte[], int, int, byte[], int, int) except that
+ * up to 64 KB before srcOff in src. This is useful
+ * for providing LZ4 with a dictionary that can be reused during decompression.
+ */
+ public abstract int decompressWithPrefix64k(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], int, int, byte[], int, int) decompress(src, srcOff, srcLen, dest, destOff, dest.length - destOff)}.
+ */
+ public final int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff) {
+ return decompress(src, srcOff, srcLen, dest, destOff, dest.length - destOff);
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompressWithPrefix64k(byte[], int, int, byte[], int, int) decompress(src, srcOff, srcLen, dest, destOff, dest.length - destOff)}.
+ */
+ public final int decompressWithPrefix64k(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff) {
+ return decompressWithPrefix64k(src, srcOff, srcLen, dest, destOff, dest.length - destOff);
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], int, int, byte[], int) decompress(src, 0, src.length, dest, 0)}
+ */
+ public final int decompress(byte[] src, byte[] dest) {
+ return decompress(src, 0, src.length, dest, 0);
+ }
+
+ /**
+ * Convenience method which returns src[srcOff:srcOff+srcLen]
+ * decompressed.
+ *
Warning: this method has an
+ * important overhead due to the fact that it needs to allocate a buffer to
+ * decompress into, and then needs to resize this buffer to the actual
+ * decompressed length.
+ * Here is how this method is implemented:
+ *
+ * byte[] decompressed = new byte[maxDestLen];
+ * final int decompressedLength = decompress(src, srcOff, srcLen, decompressed, 0, maxDestLen);
+ * if (decompressedLength != decompressed.length) {
+ * decompressed = Arrays.copyOf(decompressed, decompressedLength);
+ * }
+ * return decompressed;
+ *
+ */
+ public final byte[] decompress(byte[] src, int srcOff, int srcLen, int maxDestLen) {
+ byte[] decompressed = new byte[maxDestLen];
+ final int decompressedLength = decompress(src, srcOff, srcLen, decompressed, 0, maxDestLen);
+ if (decompressedLength != decompressed.length) {
+ decompressed = Arrays.copyOf(decompressed, decompressedLength);
+ }
+ return decompressed;
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], int, int, int) decompress(src, 0, src.length, maxDestLen)}.
+ */
+ public final byte[] decompress(byte[] src, int maxDestLen) {
+ return decompress(src, 0, src.length, maxDestLen);
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName();
+ }
+
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4UnknownSizeDecompressor.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4UnknownSizeDecompressor.java
new file mode 100644
index 000000000..10fde1c4d
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4UnknownSizeDecompressor.java
@@ -0,0 +1,27 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @deprecated Use {@link LZ4SafeDecompressor} instead.
+ */
+@Deprecated
+public interface LZ4UnknownSizeDecompressor {
+
+ int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+
+ int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff);
+
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Utils.java b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Utils.java
new file mode 100644
index 000000000..a46374bfe
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/LZ4Utils.java
@@ -0,0 +1,206 @@
+package net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static net.jpountz.lz4.LZ4Constants.HASH_LOG;
+import static net.jpountz.lz4.LZ4Constants.HASH_LOG_64K;
+import static net.jpountz.lz4.LZ4Constants.HASH_LOG_HC;
+import static net.jpountz.lz4.LZ4Constants.LAST_LITERALS;
+import static net.jpountz.lz4.LZ4Constants.MIN_MATCH;
+import static net.jpountz.lz4.LZ4Constants.ML_BITS;
+import static net.jpountz.lz4.LZ4Constants.ML_MASK;
+import static net.jpountz.lz4.LZ4Constants.RUN_MASK;
+import static net.jpountz.util.Utils.readInt;
+
+enum LZ4Utils {
+ ;
+
+ static final int maxCompressedLength(int length) {
+ if (length < 0) {
+ throw new IllegalArgumentException("length must be >= 0, got " + length);
+ }
+ return length + length / 255 + 16;
+ }
+
+ static int hash(int i) {
+ return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG);
+ }
+
+ static int hash64k(int i) {
+ return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG_64K);
+ }
+
+ static int hashHC(int i) {
+ return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG_HC);
+ }
+
+ static int readShortLittleEndian(byte[] buf, int i) {
+ return (buf[i] & 0xFF) | ((buf[i+1] & 0xFF) << 8);
+ }
+
+ static int hash(byte[] buf, int i) {
+ return hash(readInt(buf, i));
+ }
+
+ static int hash64k(byte[] buf, int i) {
+ return hash64k(readInt(buf, i));
+ }
+
+ static boolean readIntEquals(byte[] buf, int i, int j) {
+ return buf[i] == buf[j] && buf[i+1] == buf[j+1] && buf[i+2] == buf[j+2] && buf[i+3] == buf[j+3];
+ }
+
+ static void safeIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchLen) {
+ for (int i = 0; i < matchLen; ++i) {
+ dest[dOff + i] = dest[matchOff + i];
+ }
+ }
+
+ static void wildIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchCopyEnd) {
+ do {
+ copy8Bytes(dest, matchOff, dest, dOff);
+ matchOff += 8;
+ dOff += 8;
+ } while (dOff < matchCopyEnd);
+ }
+
+ static void copy8Bytes(byte[] src, int sOff, byte[] dest, int dOff) {
+ for (int i = 0; i < 8; ++i) {
+ dest[dOff + i] = src[sOff + i];
+ }
+ }
+
+ static int commonBytes(byte[] b, int o1, int o2, int limit) {
+ int count = 0;
+ while (o2 < limit && b[o1++] == b[o2++]) {
+ ++count;
+ }
+ return count;
+ }
+
+ static int commonBytesBackward(byte[] b, int o1, int o2, int l1, int l2) {
+ int count = 0;
+ while (o1 > l1 && o2 > l2 && b[--o1] == b[--o2]) {
+ ++count;
+ }
+ return count;
+ }
+
+ static void safeArraycopy(byte[] src, int sOff, byte[] dest, int dOff, int len) {
+ System.arraycopy(src, sOff, dest, dOff, len);
+ }
+
+ static void wildArraycopy(byte[] src, int sOff, byte[] dest, int dOff, int len) {
+ try {
+ for (int i = 0; i < len; i += 8) {
+ copy8Bytes(src, sOff + i, dest, dOff + i);
+ }
+ } catch (ArrayIndexOutOfBoundsException e) {
+ throw new LZ4Exception("Malformed input at offset " + sOff);
+ }
+ }
+
+ static int encodeSequence(byte[] src, int anchor, int matchOff, int matchRef, int matchLen, byte[] dest, int dOff, int destEnd) {
+ final int runLen = matchOff - anchor;
+ final int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ int token;
+ if (runLen >= RUN_MASK) {
+ token = (byte) (RUN_MASK << ML_BITS);
+ dOff = writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ token = runLen << ML_BITS;
+ }
+
+ // copy literals
+ wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ // encode offset
+ final int matchDec = matchOff - matchRef;
+ dest[dOff++] = (byte) matchDec;
+ dest[dOff++] = (byte) (matchDec >>> 8);
+
+ // encode match len
+ matchLen -= 4;
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ if (matchLen >= ML_MASK) {
+ token |= ML_MASK;
+ dOff = writeLen(matchLen - RUN_MASK, dest, dOff);
+ } else {
+ token |= matchLen;
+ }
+
+ dest[tokenOff] = (byte) token;
+
+ return dOff;
+ }
+
+ static int lastLiterals(byte[] src, int sOff, int srcLen, byte[] dest, int dOff, int destEnd) {
+ final int runLen = srcLen;
+
+ if (dOff + runLen + 1 + (runLen + 255 - RUN_MASK) / 255 > destEnd) {
+ throw new LZ4Exception();
+ }
+
+ if (runLen >= RUN_MASK) {
+ dest[dOff++] = (byte) (RUN_MASK << ML_BITS);
+ dOff = writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ dest[dOff++] = (byte) (runLen << ML_BITS);
+ }
+ // copy literals
+ System.arraycopy(src, sOff, dest, dOff, runLen);
+ dOff += runLen;
+
+ return dOff;
+ }
+
+ static int writeLen(int len, byte[] dest, int dOff) {
+ while (len >= 0xFF) {
+ dest[dOff++] = (byte) 0xFF;
+ len -= 0xFF;
+ }
+ dest[dOff++] = (byte) len;
+ return dOff;
+ }
+
+ static class Match {
+ int start, ref, len;
+
+ void fix(int correction) {
+ start += correction;
+ ref += correction;
+ len -= correction;
+ }
+
+ int end() {
+ return start + len;
+ }
+ }
+
+ static void copyTo(Match m1, Match m2) {
+ m2.len = m1.len;
+ m2.start = m1.start;
+ m2.ref = m1.ref;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/lz4/package.html b/Tools/Cache Editor/src/net/jpountz/lz4/package.html
new file mode 100644
index 000000000..e53410674
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/lz4/package.html
@@ -0,0 +1,55 @@
+
+
+
+
+
+
+
+LZ4 compression. The entry point of the API is the
+{@link net.jpountz.lz4.LZ4Factory} class, which gives access to
+{@link net.jpountz.lz4.LZ4Compressor compressors} and
+{@link net.jpountz.lz4.LZ4SafeDecompressor decompressors}.
+
+
+Sample usage:
+
+
+ LZ4Factory factory = LZ4Factory.fastestInstance();
+
+ byte[] data = "12345345234572".getBytes("UTF-8");
+ final int decompressedLength = data.length;
+
+ // compress data
+ LZ4Compressor compressor = factory.fastCompressor();
+ int maxCompressedLength = compressor.maxCompressedLength(decompressedLength);
+ byte[] compressed = new byte[maxCompressedLength];
+ int compressedLength = compressor.compress(data, 0, decompressedLength, compressed, 0, maxCompressedLength);
+
+ // decompress data
+ // - method 1: when the decompressed length is known
+ LZ4FastDecompressor decompressor = factory.fastDecompressor();
+ byte[] restored = new byte[decompressedLength];
+ int compressedLength2 = decompressor.decompress(compressed, 0, restored, 0, decompressedLength);
+ // compressedLength == compressedLength2
+
+ // - method 2: when the compressed length is known (a little slower)
+ // the destination buffer needs to be over-sized
+ LZ4SafeDecompressor decompressor2 = factory.safeDecompressor();
+ int decompressedLength2 = decompressor2.decompress(compressed, 0, compressedLength, restored, 0);
+ // decompressedLength == decompressedLength2
+
+
+
+
diff --git a/Tools/Cache Editor/src/net/jpountz/util/Native.java b/Tools/Cache Editor/src/net/jpountz/util/Native.java
new file mode 100644
index 000000000..34f7877bc
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/util/Native.java
@@ -0,0 +1,121 @@
+package net.jpountz.util;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+/** FOR INTERNAL USE ONLY */
+public enum Native {
+ ;
+
+ private enum OS {
+ // Even on Windows, the default compiler from cpptasks (gcc) uses .so as a shared lib extension
+ WINDOWS("win32", "so"), LINUX("linux", "so"), MAC("darwin", "dylib"), SOLARIS("solaris", "so");
+ public final String name, libExtension;
+
+ private OS(String name, String libExtension) {
+ this.name = name;
+ this.libExtension = libExtension;
+ }
+ }
+
+ private static String arch() {
+ return System.getProperty("os.arch");
+ }
+
+ private static OS os() {
+ String osName = System.getProperty("os.name");
+ if (osName.contains("Linux")) {
+ return OS.LINUX;
+ } else if (osName.contains("Mac")) {
+ return OS.MAC;
+ } else if (osName.contains("Windows")) {
+ return OS.WINDOWS;
+ } else if (osName.contains("Solaris")) {
+ return OS.SOLARIS;
+ } else {
+ throw new UnsupportedOperationException("Unsupported operating system: "
+ + osName);
+ }
+ }
+
+ private static String resourceName() {
+ OS os = os();
+ return "/" + os.name + "/" + arch() + "/liblz4-java." + os.libExtension;
+ }
+
+ private static boolean loaded = false;
+
+ public static synchronized boolean isLoaded() {
+ return loaded;
+ }
+
+ public static synchronized void load() {
+ if (loaded) {
+ return;
+ }
+ String resourceName = resourceName();
+ InputStream is = Native.class.getResourceAsStream(resourceName);
+ if (is == null) {
+ throw new UnsupportedOperationException("Unsupported OS/arch, cannot find " + resourceName + ". Please try building from source.");
+ }
+ File tempLib;
+ try {
+ tempLib = File.createTempFile("liblz4-java", "." + os().libExtension);
+ // copy to tempLib
+ FileOutputStream out = new FileOutputStream(tempLib);
+ try {
+ byte[] buf = new byte[4096];
+ while (true) {
+ int read = is.read(buf);
+ if (read == -1) {
+ break;
+ }
+ out.write(buf, 0, read);
+ }
+ try {
+ out.close();
+ out = null;
+ } catch (IOException e) {
+ // ignore
+ }
+ System.load(tempLib.getAbsolutePath());
+ loaded = true;
+ } finally {
+ try {
+ if (out != null) {
+ out.close();
+ }
+ } catch (IOException e) {
+ // ignore
+ }
+ if (tempLib != null && tempLib.exists()) {
+ if (!loaded) {
+ tempLib.delete();
+ } else {
+ // try to delete on exit, does it work on Windows?
+ tempLib.deleteOnExit();
+ }
+ }
+ }
+ } catch (IOException e) {
+ throw new ExceptionInInitializerError("Cannot unpack liblz4-java");
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/util/Utils.java b/Tools/Cache Editor/src/net/jpountz/util/Utils.java
new file mode 100644
index 000000000..acc2ba007
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/util/Utils.java
@@ -0,0 +1,89 @@
+package net.jpountz.util;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.nio.ByteOrder;
+
+public enum Utils {
+ ;
+
+ public static final ByteOrder NATIVE_BYTE_ORDER = ByteOrder.nativeOrder();
+
+ public static void checkRange(byte[] buf, int off) {
+ if (off < 0 || off >= buf.length) {
+ throw new ArrayIndexOutOfBoundsException(off);
+ }
+ }
+
+ public static void checkRange(byte[] buf, int off, int len) {
+ checkLength(len);
+ if (len > 0) {
+ checkRange(buf, off);
+ checkRange(buf, off + len - 1);
+ }
+ }
+
+ public static void checkLength(int len) {
+ if (len < 0) {
+ throw new IllegalArgumentException("lengths must be >= 0");
+ }
+ }
+
+ public static byte readByte(byte[] buf, int i) {
+ return buf[i];
+ }
+
+ public static int readIntBE(byte[] buf, int i) {
+ return ((buf[i] & 0xFF) << 24) | ((buf[i+1] & 0xFF) << 16) | ((buf[i+2] & 0xFF) << 8) | (buf[i+3] & 0xFF);
+ }
+
+ public static int readIntLE(byte[] buf, int i) {
+ return (buf[i] & 0xFF) | ((buf[i+1] & 0xFF) << 8) | ((buf[i+2] & 0xFF) << 16) | ((buf[i+3] & 0xFF) << 24);
+ }
+
+ public static int readInt(byte[] buf, int i) {
+ if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
+ return readIntBE(buf, i);
+ } else {
+ return readIntLE(buf, i);
+ }
+ }
+
+ public static void writeShortLittleEndian(byte[] buf, int off, int v) {
+ buf[off++] = (byte) v;
+ buf[off++] = (byte) (v >>> 8);
+ }
+
+ public static void writeInt(int[] buf, int off, int v) {
+ buf[off] = v;
+ }
+
+ public static int readInt(int[] buf, int off) {
+ return buf[off];
+ }
+
+ public static void writeByte(byte[] dest, int tokenOff, int i) {
+ dest[tokenOff] = (byte) i;
+ }
+
+ public static void writeShort(short[] buf, int off, int v) {
+ buf[off] = (short) v;
+ }
+
+ public static int readShort(short[] buf, int off) {
+ return buf[off] & 0xFFFF;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/util/package.html b/Tools/Cache Editor/src/net/jpountz/util/package.html
new file mode 100644
index 000000000..4b3ceb980
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/util/package.html
@@ -0,0 +1,22 @@
+
+
+
+
+
+
+
+Utility classes.
+
+
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/net/jpountz/xxhash/AbstractStreamingXXHash32Java.java b/Tools/Cache Editor/src/net/jpountz/xxhash/AbstractStreamingXXHash32Java.java
new file mode 100644
index 000000000..3dd3ed4f3
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/xxhash/AbstractStreamingXXHash32Java.java
@@ -0,0 +1,42 @@
+package net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static net.jpountz.xxhash.XXHashConstants.PRIME1;
+import static net.jpountz.xxhash.XXHashConstants.PRIME2;
+
+abstract class AbstractStreamingXXHash32Java extends StreamingXXHash32 {
+
+ int v1, v2, v3, v4, memSize;
+ long totalLen;
+ final byte[] memory;
+
+ AbstractStreamingXXHash32Java(int seed) {
+ super(seed);
+ memory = new byte[16];
+ reset();
+ }
+
+ @Override
+ public void reset() {
+ v1 = seed + PRIME1 + PRIME2;
+ v2 = seed + PRIME2;
+ v3 = seed + 0;
+ v4 = seed - PRIME1;
+ totalLen = 0;
+ memSize = 0;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/xxhash/StreamingXXHash32.java b/Tools/Cache Editor/src/net/jpountz/xxhash/StreamingXXHash32.java
new file mode 100644
index 000000000..c59f03466
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/xxhash/StreamingXXHash32.java
@@ -0,0 +1,111 @@
+package net.jpountz.xxhash;
+
+import java.util.zip.Checksum;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+/**
+ * Streaming interface for {@link XXHash32}.
+ *
+ * This API is compatible with the {@link XXHash32 block API} and the following
+ * code samples are equivalent:
+ *
+ * int hash(XXHashFactory xxhashFactory, byte[] buf, int off, int len, int seed) {
+ * return xxhashFactory.hash32().hash(buf, off, len, seed);
+ * }
+ *
+ *
+ * int hash(XXHashFactory xxhashFactory, byte[] buf, int off, int len, int seed) {
+ * StreamingXXHash32 sh32 = xxhashFactory.newStreamingHash32(seed);
+ * sh32.update(buf, off, len);
+ * return sh32.getValue();
+ * }
+ *
+ *
+ * Instances of this class are not thread-safe.
+ */
+public abstract class StreamingXXHash32 {
+
+ interface Factory {
+
+ StreamingXXHash32 newStreamingHash(int seed);
+
+ }
+
+ final int seed;
+
+ StreamingXXHash32(int seed) {
+ this.seed = seed;
+ }
+
+ /**
+ * Get the value of the checksum.
+ */
+ public abstract int getValue();
+
+ /**
+ * Update the value of the hash with buf[off:off+len].
+ */
+ public abstract void update(byte[] buf, int off, int len);
+
+ /**
+ * Reset this instance to the state it had right after instantiation. The
+ * seed remains unchanged.
+ */
+ public abstract void reset();
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + "(seed=" + seed + ")";
+ }
+
+ /**
+ * Return a {@link Checksum} view of this instance. Modifications to the view
+ * will modify this instance too and vice-versa.
+ */
+ public final Checksum asChecksum() {
+ return new Checksum() {
+
+ @Override
+ public long getValue() {
+ return StreamingXXHash32.this.getValue() & 0xFFFFFFFL;
+ }
+
+ @Override
+ public void reset() {
+ StreamingXXHash32.this.reset();
+ }
+
+ @Override
+ public void update(int b) {
+ StreamingXXHash32.this.update(new byte[] {(byte) b}, 0, 1);
+ }
+
+ @Override
+ public void update(byte[] b, int off, int len) {
+ StreamingXXHash32.this.update(b, off, len);
+ }
+
+ @Override
+ public String toString() {
+ return StreamingXXHash32.this.toString();
+ }
+
+ };
+ }
+
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/net/jpountz/xxhash/StreamingXXHash32JNI.java b/Tools/Cache Editor/src/net/jpountz/xxhash/StreamingXXHash32JNI.java
new file mode 100644
index 000000000..98f417cde
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/xxhash/StreamingXXHash32JNI.java
@@ -0,0 +1,71 @@
+package net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+final class StreamingXXHash32JNI extends StreamingXXHash32 {
+
+ static class Factory implements StreamingXXHash32.Factory {
+
+ public static final StreamingXXHash32.Factory INSTANCE = new Factory();
+
+ @Override
+ public StreamingXXHash32 newStreamingHash(int seed) {
+ return new StreamingXXHash32JNI(seed);
+ }
+
+ }
+
+ private long state;
+
+ StreamingXXHash32JNI(int seed) {
+ super(seed);
+ state = XXHashJNI.XXH32_init(seed);
+ }
+
+ private void checkState() {
+ if (state == 0) {
+ throw new AssertionError("Already finalized");
+ }
+ }
+
+ @Override
+ public void reset() {
+ checkState();
+ XXHashJNI.XXH32_free(state);
+ state = XXHashJNI.XXH32_init(seed);
+ }
+
+ @Override
+ public int getValue() {
+ checkState();
+ return XXHashJNI.XXH32_intermediateDigest(state);
+ }
+
+ @Override
+ public void update(byte[] bytes, int off, int len) {
+ checkState();
+ XXHashJNI.XXH32_update(state, bytes, off, len);
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ super.finalize();
+ // free memory
+ XXHashJNI.XXH32_free(state);
+ state = 0;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/xxhash/XXHash32.java b/Tools/Cache Editor/src/net/jpountz/xxhash/XXHash32.java
new file mode 100644
index 000000000..1a80023d9
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/xxhash/XXHash32.java
@@ -0,0 +1,35 @@
+package net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * A 32-bits hash.
+ *
+ * Instances of this class are thread-safe.
+ */
+public abstract class XXHash32 {
+
+ /**
+ * Compute the 32-bits hash of buf[off:off+len] using seed
+ * seed.
+ */
+ public abstract int hash(byte[] buf, int off, int len, int seed);
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName();
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/xxhash/XXHash32JNI.java b/Tools/Cache Editor/src/net/jpountz/xxhash/XXHash32JNI.java
new file mode 100644
index 000000000..bb75980b4
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/xxhash/XXHash32JNI.java
@@ -0,0 +1,29 @@
+package net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static net.jpountz.util.Utils.checkRange;
+
+final class XXHash32JNI extends XXHash32 {
+
+ public static final XXHash32 INSTANCE = new XXHash32JNI();
+
+ @Override
+ public int hash(byte[] buf, int off, int len, int seed) {
+ checkRange(buf, off, len);
+ return XXHashJNI.XXH32(buf, off, len, seed);
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/xxhash/XXHashConstants.java b/Tools/Cache Editor/src/net/jpountz/xxhash/XXHashConstants.java
new file mode 100644
index 000000000..771a0d2c8
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/xxhash/XXHashConstants.java
@@ -0,0 +1,26 @@
+package net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+enum XXHashConstants {
+ ;
+
+ static final int PRIME1 = -1640531535;
+ static final int PRIME2 = -2048144777;
+ static final int PRIME3 = -1028477379;
+ static final int PRIME4 = 668265263;
+ static final int PRIME5 = 374761393;
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/xxhash/XXHashFactory.java b/Tools/Cache Editor/src/net/jpountz/xxhash/XXHashFactory.java
new file mode 100644
index 000000000..a7750b679
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/xxhash/XXHashFactory.java
@@ -0,0 +1,190 @@
+package net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.util.Random;
+
+import net.jpountz.util.Native;
+
+/**
+ * Entry point to get {@link XXHash32} and {@link StreamingXXHash32} instances.
+ *
+ * This class has 3 instances
+ * - a {@link #nativeInstance() native} instance which is a JNI binding to
+ * the original LZ4 C implementation.
+ *
- a {@link #safeInstance() safe Java} instance which is a pure Java port
+ * of the original C library,
+ * - an {@link #unsafeInstance() unsafe Java} instance which is a Java port
+ * using the unofficial {@link sun.misc.Unsafe} API.
+ *
+ *
+ * Only the {@link #safeInstance() safe instance} is guaranteed to work on your
+ * JVM, as a consequence it is advised to use the {@link #fastestInstance()} or
+ * {@link #fastestJavaInstance()} to pull a {@link XXHashFactory} instance.
+ *
+ * All methods from this class are very costly, so you should get an instance
+ * once, and then reuse it whenever possible. This is typically done by storing
+ * a {@link XXHashFactory} instance in a static field.
+ */
+public final class XXHashFactory {
+
+ private static XXHashFactory instance(String impl) {
+ try {
+ return new XXHashFactory(impl);
+ } catch (Exception e) {
+ throw new AssertionError(e);
+ }
+ }
+
+ private static XXHashFactory NATIVE_INSTANCE,
+ JAVA_UNSAFE_INSTANCE,
+ JAVA_SAFE_INSTANCE;
+
+ /** Return a {@link XXHashFactory} that returns {@link XXHash32} instances that
+ * are native bindings to the original C API.
+ *
+ * Please note that this instance has some traps you should be aware of:
+ * - Upon loading this instance, files will be written to the temporary
+ * directory of the system. Although these files are supposed to be deleted
+ * when the JVM exits, they might remain on systems that don't support
+ * removal of files being used such as Windows.
+ *
- The instance can only be loaded once per JVM. This can be a problem
+ * if your application uses multiple class loaders (such as most servlet
+ * containers): this instance will only be available to the children of the
+ * class loader which has loaded it. As a consequence, it is advised to
+ * either not use this instance in webapps or to put this library in the lib
+ * directory of your servlet container so that it is loaded by the system
+ * class loader.
+ *
+ */
+ public static synchronized XXHashFactory nativeInstance() {
+ if (NATIVE_INSTANCE == null) {
+ NATIVE_INSTANCE = instance("JNI");
+ }
+ return NATIVE_INSTANCE;
+ }
+
+ /** Return a {@link XXHashFactory} that returns {@link XXHash32} instances that
+ * are written with Java's official API. */
+ public static synchronized XXHashFactory safeInstance() {
+ if (JAVA_SAFE_INSTANCE == null) {
+ JAVA_SAFE_INSTANCE = instance("JavaSafe");
+ }
+ return JAVA_SAFE_INSTANCE;
+ }
+
+ /** Return a {@link XXHashFactory} that returns {@link XXHash32} instances that
+ * may use {@link sun.misc.Unsafe} to speed up hashing. */
+ public static synchronized XXHashFactory unsafeInstance() {
+ if (JAVA_UNSAFE_INSTANCE == null) {
+ JAVA_UNSAFE_INSTANCE = instance("JavaUnsafe");
+ }
+ return JAVA_UNSAFE_INSTANCE;
+ }
+
+ /**
+ * Return the fastest available {@link XXHashFactory} instance which does not
+ * rely on JNI bindings. It first tries to load the
+ * {@link #unsafeInstance() unsafe instance}, and then the
+ * {@link #safeInstance() safe Java instance} if the JVM doesn't have a
+ * working {@link sun.misc.Unsafe}.
+ */
+ public static XXHashFactory fastestJavaInstance() {
+ try {
+ return unsafeInstance();
+ } catch (Throwable t) {
+ return safeInstance();
+ }
+ }
+
+ /**
+ * Return the fastest available {@link XXHashFactory} instance. If the class
+ * loader is the system class loader and if the
+ * {@link #nativeInstance() native instance} loads successfully, then the
+ * {@link #nativeInstance() native instance} is returned, otherwise the
+ * {@link #fastestJavaInstance() fastest Java instance} is returned.
+ *
+ * Please read {@link #nativeInstance() javadocs of nativeInstance()} before
+ * using this method.
+ */
+ public static XXHashFactory fastestInstance() {
+ if (Native.isLoaded()
+ || Native.class.getClassLoader() == ClassLoader.getSystemClassLoader()) {
+ try {
+ return nativeInstance();
+ } catch (Throwable t) {
+ return fastestJavaInstance();
+ }
+ } else {
+ return fastestJavaInstance();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private static T classInstance(String cls) throws NoSuchFieldException, SecurityException, ClassNotFoundException, IllegalArgumentException, IllegalAccessException {
+ final Class> c = Class.forName(cls);
+ Field f = c.getField("INSTANCE");
+ return (T) f.get(null);
+ }
+
+ private final String impl;
+ private final XXHash32 hash32;
+ private final StreamingXXHash32.Factory streamingHash32Factory;
+
+ private XXHashFactory(String impl) throws ClassNotFoundException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException {
+ this.impl = impl;
+ hash32 = classInstance("net.jpountz.xxhash.XXHash32" + impl);
+ streamingHash32Factory = classInstance("net.jpountz.xxhash.StreamingXXHash32" + impl + "$Factory");
+
+ // make sure it can run
+ final byte[] bytes = new byte[100];
+ final Random random = new Random();
+ random.nextBytes(bytes);
+ final int seed = random.nextInt();
+
+ final int h1 = hash32.hash(bytes, 0, bytes.length, seed);
+ final StreamingXXHash32 streamingHash32 = newStreamingHash32(seed);
+ streamingHash32.update(bytes, 0, bytes.length);
+ final int h2 = streamingHash32.getValue();
+ if (h1 != h2) {
+ throw new AssertionError();
+ }
+ }
+
+ /** Return a {@link XXHash32} instance. */
+ public XXHash32 hash32() {
+ return hash32;
+ }
+
+ /**
+ * Return a new {@link StreamingXXHash32} instance.
+ */
+ public StreamingXXHash32 newStreamingHash32(int seed) {
+ return streamingHash32Factory.newStreamingHash(seed);
+ }
+
+ /** Prints the fastest instance. */
+ public static void main(String[] args) {
+ System.out.println("Fastest instance is " + fastestInstance());
+ System.out.println("Fastest Java instance is " + fastestJavaInstance());
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + ":" + impl;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/xxhash/XXHashJNI.java b/Tools/Cache Editor/src/net/jpountz/xxhash/XXHashJNI.java
new file mode 100644
index 000000000..74c40211e
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/xxhash/XXHashJNI.java
@@ -0,0 +1,35 @@
+package net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import net.jpountz.util.Native;
+
+enum XXHashJNI {
+ ;
+
+ static {
+ Native.load();
+ init();
+ }
+
+ private static native void init();
+ static native int XXH32(byte[] input, int offset, int len, int seed);
+ static native long XXH32_init(int seed);
+ static native void XXH32_update(long state, byte[] input, int offset, int len);
+ static native int XXH32_intermediateDigest(long state);
+ static native int XXH32_digest(long state);
+ static native void XXH32_free(long state);
+
+}
diff --git a/Tools/Cache Editor/src/net/jpountz/xxhash/package.html b/Tools/Cache Editor/src/net/jpountz/xxhash/package.html
new file mode 100644
index 000000000..f595d25a8
--- /dev/null
+++ b/Tools/Cache Editor/src/net/jpountz/xxhash/package.html
@@ -0,0 +1,65 @@
+
+
+
+
+
+
+
+xxhash hashing. This package supports both block hashing via
+{@link net.jpountz.xxhash.XXHash32} and streaming hashing via
+{@link net.jpountz.xxhash.StreamingXXHash32}. Have a look at
+{@link net.jpountz.xxhash.XXHashFactory} to know how to get instances of these
+interfaces.
+
+Streaming hashing is a little slower but doesn't require to load the whole
+stream into memory.
+
+Sample block usage:
+
+
+ XXHashFactory factory = XXHashFactory.fastestInstance();
+
+ byte[] data = "12345345234572".getBytes("UTF-8");
+
+ XXHash32 hash32 = factory.hash32();
+ int seed = 0x9747b28c; // used to initialize the hash value, use whatever
+ // value you want, but always the same
+ int hash = hash32.hash(data, 0, data.length, seed);
+
+
+Sample streaming usage:
+
+
+ XXHashFactory factory = XXHashFactory.fastestInstance();
+
+ byte[] data = "12345345234572".getBytes("UTF-8");
+ ByteArrayInputStream in = new ByteArrayInputStream(data);
+
+ int seed = 0x9747b28c; // used to initialize the hash value, use whatever
+ // value you want, but always the same
+ StreamingXXHash32 hash32 = factory.newStreamingHash32(seed);
+ byte[] buf = new byte[8]; // for real-world usage, use a larger buffer, like 8192 bytes
+ for (;;) {
+ int read = in.read(buf);
+ if (read == -1) {
+ break;
+ }
+ hash32.update(buf, 0, read);
+ }
+ int hash = hash32.getValue();
+
+
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveEntry.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveEntry.java
new file mode 100644
index 000000000..0060215dd
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveEntry.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers;
+
+import java.util.Date;
+
+/**
+ * Represents an entry of an archive.
+ */
+public interface ArchiveEntry {
+
+ /**
+ * Gets the name of the entry in this archive. May refer to a file or directory or other item.
+ *
+ * @return The name of this entry in the archive.
+ */
+ public String getName();
+
+ /**
+ * Gets the uncompressed size of this entry. May be -1 (SIZE_UNKNOWN) if the size is unknown
+ *
+ * @return the uncompressed size of this entry.
+ */
+ public long getSize();
+
+ /** Special value indicating that the size is unknown */
+ public static final long SIZE_UNKNOWN = -1;
+
+ /**
+ * Returns true if this entry refers to a directory.
+ *
+ * @return true if this entry refers to a directory.
+ */
+ public boolean isDirectory();
+
+ /**
+ * Gets the last modified date of this entry.
+ *
+ * @return the last modified date of this entry.
+ * @since 1.1
+ */
+ public Date getLastModifiedDate();
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveException.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveException.java
new file mode 100644
index 000000000..0c2fce3ea
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveException.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers;
+
+/**
+ * Archiver related Exception
+ */
+public class ArchiveException extends Exception {
+
+ /** Serial */
+ private static final long serialVersionUID = 2772690708123267100L;
+
+ /**
+ * Constructs a new exception with the specified detail message. The cause
+ * is not initialized.
+ *
+ * @param message
+ * the detail message
+ */
+ public ArchiveException(String message) {
+ super(message);
+ }
+
+ /**
+ * Constructs a new exception with the specified detail message and cause.
+ *
+ * @param message
+ * the detail message
+ * @param cause
+ * the cause
+ */
+ public ArchiveException(String message, Exception cause) {
+ super(message);
+ this.initCause(cause);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveInputStream.java
new file mode 100644
index 000000000..4d54c2d5b
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveInputStream.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Archive input streams MUST override the
+ * {@link #read(byte[], int, int)} - or {@link #read()} -
+ * method so that reading from the stream generates EOF for the end of
+ * data in each entry as well as at the end of the file proper.
+ *
+ * The {@link #getNextEntry()} method is used to reset the input stream
+ * ready for reading the data from the next entry.
+ *
+ * The input stream classes must also implement a method with the signature:
+ *
+ * public static boolean matches(byte[] signature, int length)
+ *
+ * which is used by the {@link ArchiveStreamFactory} to autodetect
+ * the archive type from the first few bytes of a stream.
+ */
+public abstract class ArchiveInputStream extends InputStream {
+
+ private final byte[] SINGLE = new byte[1];
+ private static final int BYTE_MASK = 0xFF;
+
+ /** holds the number of bytes read in this stream */
+ private long bytesRead = 0;
+
+ /**
+ * Returns the next Archive Entry in this Stream.
+ *
+ * @return the next entry,
+ * or {@code null} if there are no more entries
+ * @throws IOException if the next entry could not be read
+ */
+ public abstract ArchiveEntry getNextEntry() throws IOException;
+
+ /*
+ * Note that subclasses also implement specific get() methods which
+ * return the appropriate class without need for a cast.
+ * See SVN revision r743259
+ * @return
+ * @throws IOException
+ */
+ // public abstract XXXArchiveEntry getNextXXXEntry() throws IOException;
+
+ /**
+ * Reads a byte of data. This method will block until enough input is
+ * available.
+ *
+ * Simply calls the {@link #read(byte[], int, int)} method.
+ *
+ * MUST be overridden if the {@link #read(byte[], int, int)} method
+ * is not overridden; may be overridden otherwise.
+ *
+ * @return the byte read, or -1 if end of input is reached
+ * @throws IOException
+ * if an I/O error has occurred
+ */
+ @Override
+ public int read() throws IOException {
+ int num = read(SINGLE, 0, 1);
+ return num == -1 ? -1 : SINGLE[0] & BYTE_MASK;
+ }
+
+ /**
+ * Increments the counter of already read bytes.
+ * Doesn't increment if the EOF has been hit (read == -1)
+ *
+ * @param read the number of bytes read
+ */
+ protected void count(int read) {
+ count((long) read);
+ }
+
+ /**
+ * Increments the counter of already read bytes.
+ * Doesn't increment if the EOF has been hit (read == -1)
+ *
+ * @param read the number of bytes read
+ * @since 1.1
+ */
+ protected void count(long read) {
+ if (read != -1) {
+ bytesRead = bytesRead + read;
+ }
+ }
+
+ /**
+ * Decrements the counter of already read bytes.
+ *
+ * @param pushedBack the number of bytes pushed back.
+ * @since 1.1
+ */
+ protected void pushedBackBytes(long pushedBack) {
+ bytesRead -= pushedBack;
+ }
+
+ /**
+ * Returns the current number of bytes read from this stream.
+ * @return the number of read bytes
+ * @deprecated this method may yield wrong results for large
+ * archives, use #getBytesRead instead
+ */
+ @Deprecated
+ public int getCount() {
+ return (int) bytesRead;
+ }
+
+ /**
+ * Returns the current number of bytes read from this stream.
+ * @return the number of read bytes
+ * @since 1.1
+ */
+ public long getBytesRead() {
+ return bytesRead;
+ }
+
+ /**
+ * Whether this stream is able to read the given entry.
+ *
+ *
+ * Some archive formats support variants or details that are not supported (yet).
+ *
+ *
+ * @param archiveEntry
+ * the entry to test
+ * @return This implementation always returns true.
+ *
+ * @since 1.1
+ */
+ public boolean canReadEntryData(ArchiveEntry archiveEntry) {
+ return true;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveOutputStream.java
new file mode 100644
index 000000000..3a5084a08
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveOutputStream.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * Archive output stream implementations are expected to override the
+ * {@link #write(byte[], int, int)} method to improve performance.
+ * They should also override {@link #close()} to ensure that any necessary
+ * trailers are added.
+ *
+ * The normal sequence of calls when working with ArchiveOutputStreams is:
+ *
+ * - Create ArchiveOutputStream object,
+ * - optionally write SFX header (Zip only),
+ * - repeat as needed:
+ *
+ * - {@link #putArchiveEntry(ArchiveEntry)} (writes entry header),
+ *
- {@link #write(byte[])} (writes entry data, as often as needed),
+ *
- {@link #closeArchiveEntry()} (closes entry),
+ *
+ *
+ * - {@link #finish()} (ends the addition of entries),
+ * - optionally write additional data, provided format supports it,
+ * - {@link #close()}.
+ *
+ */
+public abstract class ArchiveOutputStream extends OutputStream {
+
+ /** Temporary buffer used for the {@link #write(int)} method */
+ private final byte[] oneByte = new byte[1];
+ static final int BYTE_MASK = 0xFF;
+
+ /** holds the number of bytes written to this stream */
+ private long bytesWritten = 0;
+ // Methods specific to ArchiveOutputStream
+
+ /**
+ * Writes the headers for an archive entry to the output stream.
+ * The caller must then write the content to the stream and call
+ * {@link #closeArchiveEntry()} to complete the process.
+ *
+ * @param entry describes the entry
+ * @throws IOException
+ */
+ public abstract void putArchiveEntry(ArchiveEntry entry) throws IOException;
+
+ /**
+ * Closes the archive entry, writing any trailer information that may
+ * be required.
+ * @throws IOException
+ */
+ public abstract void closeArchiveEntry() throws IOException;
+
+ /**
+ * Finishes the addition of entries to this stream, without closing it.
+ * Additional data can be written, if the format supports it.
+ *
+ * @throws IOException if the user forgets to close the entry.
+ */
+ public abstract void finish() throws IOException;
+
+ /**
+ * Create an archive entry using the inputFile and entryName provided.
+ *
+ * @param inputFile
+ * @param entryName
+ * @return the ArchiveEntry set up with details from the file
+ *
+ * @throws IOException
+ */
+ public abstract ArchiveEntry createArchiveEntry(File inputFile, String entryName) throws IOException;
+
+ // Generic implementations of OutputStream methods that may be useful to sub-classes
+
+ /**
+ * Writes a byte to the current archive entry.
+ *
+ * This method simply calls {@code write( byte[], 0, 1 )}.
+ *
+ *
MUST be overridden if the {@link #write(byte[], int, int)} method
+ * is not overridden; may be overridden otherwise.
+ *
+ * @param b The byte to be written.
+ * @throws IOException on error
+ */
+ @Override
+ public void write(int b) throws IOException {
+ oneByte[0] = (byte) (b & BYTE_MASK);
+ write(oneByte, 0, 1);
+ }
+
+ /**
+ * Increments the counter of already written bytes.
+ * Doesn't increment if EOF has been hit ({@code written == -1}).
+ *
+ * @param written the number of bytes written
+ */
+ protected void count(int written) {
+ count((long) written);
+ }
+
+ /**
+ * Increments the counter of already written bytes.
+ * Doesn't increment if EOF has been hit ({@code written == -1}).
+ *
+ * @param written the number of bytes written
+ * @since 1.1
+ */
+ protected void count(long written) {
+ if (written != -1) {
+ bytesWritten = bytesWritten + written;
+ }
+ }
+
+ /**
+ * Returns the current number of bytes written to this stream.
+ * @return the number of written bytes
+ * @deprecated this method may yield wrong results for large
+ * archives, use #getBytesWritten instead
+ */
+ @Deprecated
+ public int getCount() {
+ return (int) bytesWritten;
+ }
+
+ /**
+ * Returns the current number of bytes written to this stream.
+ * @return the number of written bytes
+ * @since 1.1
+ */
+ public long getBytesWritten() {
+ return bytesWritten;
+ }
+
+ /**
+ * Whether this stream is able to write the given entry.
+ *
+ *
Some archive formats support variants or details that are
+ * not supported (yet).
+ *
+ * @param archiveEntry
+ * the entry to test
+ * @return This implementation always returns true.
+ * @since 1.1
+ */
+ public boolean canWriteEntryData(ArchiveEntry archiveEntry) {
+ return true;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveStreamFactory.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveStreamFactory.java
new file mode 100644
index 000000000..bf9bd3797
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ArchiveStreamFactory.java
@@ -0,0 +1,342 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.commons.compress.archivers.ar.ArArchiveInputStream;
+import org.apache.commons.compress.archivers.ar.ArArchiveOutputStream;
+import org.apache.commons.compress.archivers.arj.ArjArchiveInputStream;
+import org.apache.commons.compress.archivers.cpio.CpioArchiveInputStream;
+import org.apache.commons.compress.archivers.cpio.CpioArchiveOutputStream;
+import org.apache.commons.compress.archivers.dump.DumpArchiveInputStream;
+import org.apache.commons.compress.archivers.jar.JarArchiveInputStream;
+import org.apache.commons.compress.archivers.jar.JarArchiveOutputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
+import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
+import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
+import org.apache.commons.compress.utils.IOUtils;
+
+/**
+ * Factory to create Archive[In|Out]putStreams from names or the first bytes of
+ * the InputStream. In order to add other implementations, you should extend
+ * ArchiveStreamFactory and override the appropriate methods (and call their
+ * implementation from super of course).
+ *
+ * Compressing a ZIP-File:
+ *
+ *
+ * final OutputStream out = new FileOutputStream(output);
+ * ArchiveOutputStream os = new ArchiveStreamFactory().createArchiveOutputStream(ArchiveStreamFactory.ZIP, out);
+ *
+ * os.putArchiveEntry(new ZipArchiveEntry("testdata/test1.xml"));
+ * IOUtils.copy(new FileInputStream(file1), os);
+ * os.closeArchiveEntry();
+ *
+ * os.putArchiveEntry(new ZipArchiveEntry("testdata/test2.xml"));
+ * IOUtils.copy(new FileInputStream(file2), os);
+ * os.closeArchiveEntry();
+ * os.close();
+ *
+ *
+ * Decompressing a ZIP-File:
+ *
+ *
+ * final InputStream is = new FileInputStream(input);
+ * ArchiveInputStream in = new ArchiveStreamFactory().createArchiveInputStream(ArchiveStreamFactory.ZIP, is);
+ * ZipArchiveEntry entry = (ZipArchiveEntry)in.getNextEntry();
+ * OutputStream out = new FileOutputStream(new File(dir, entry.getName()));
+ * IOUtils.copy(in, out);
+ * out.close();
+ * in.close();
+ *
+ *
+ * @Immutable
+ */
+public class ArchiveStreamFactory {
+
+ /**
+ * Constant used to identify the AR archive format.
+ * @since 1.1
+ */
+ public static final String AR = "ar";
+ /**
+ * Constant used to identify the ARJ archive format.
+ * @since 1.6
+ */
+ public static final String ARJ = "arj";
+ /**
+ * Constant used to identify the CPIO archive format.
+ * @since 1.1
+ */
+ public static final String CPIO = "cpio";
+ /**
+ * Constant used to identify the Unix DUMP archive format.
+ * @since 1.3
+ */
+ public static final String DUMP = "dump";
+ /**
+ * Constant used to identify the JAR archive format.
+ * @since 1.1
+ */
+ public static final String JAR = "jar";
+ /**
+ * Constant used to identify the TAR archive format.
+ * @since 1.1
+ */
+ public static final String TAR = "tar";
+ /**
+ * Constant used to identify the ZIP archive format.
+ * @since 1.1
+ */
+ public static final String ZIP = "zip";
+
+ /**
+ * Entry encoding, null for the default.
+ */
+ private String entryEncoding = null;
+
+ /**
+ * Returns the encoding to use for arj, zip, dump, cpio and tar
+ * files, or null for the default.
+ *
+ * @return entry encoding, or null
+ * @since 1.5
+ */
+ public String getEntryEncoding() {
+ return entryEncoding;
+ }
+
+ /**
+ * Sets the encoding to use for arj, zip, dump, cpio and tar files. Use null for the default.
+ *
+ * @param entryEncoding the entry encoding, null uses the default.
+ * @since 1.5
+ */
+ public void setEntryEncoding(String entryEncoding) {
+ this.entryEncoding = entryEncoding;
+ }
+
+ /**
+ * Create an archive input stream from an archiver name and an input stream.
+ *
+ * @param archiverName the archive name, i.e. "ar", "arj", "zip", "tar", "jar", "dump" or "cpio"
+ * @param in the input stream
+ * @return the archive input stream
+ * @throws ArchiveException if the archiver name is not known
+ * @throws IllegalArgumentException if the archiver name or stream is null
+ */
+ public ArchiveInputStream createArchiveInputStream(
+ final String archiverName, final InputStream in)
+ throws ArchiveException {
+
+ if (archiverName == null) {
+ throw new IllegalArgumentException("Archivername must not be null.");
+ }
+
+ if (in == null) {
+ throw new IllegalArgumentException("InputStream must not be null.");
+ }
+
+ if (AR.equalsIgnoreCase(archiverName)) {
+ return new ArArchiveInputStream(in);
+ }
+ if (ARJ.equalsIgnoreCase(archiverName)) {
+ if (entryEncoding != null) {
+ return new ArjArchiveInputStream(in, entryEncoding);
+ } else {
+ return new ArjArchiveInputStream(in);
+ }
+ }
+ if (ZIP.equalsIgnoreCase(archiverName)) {
+ if (entryEncoding != null) {
+ return new ZipArchiveInputStream(in, entryEncoding);
+ } else {
+ return new ZipArchiveInputStream(in);
+ }
+ }
+ if (TAR.equalsIgnoreCase(archiverName)) {
+ if (entryEncoding != null) {
+ return new TarArchiveInputStream(in, entryEncoding);
+ } else {
+ return new TarArchiveInputStream(in);
+ }
+ }
+ if (JAR.equalsIgnoreCase(archiverName)) {
+ return new JarArchiveInputStream(in);
+ }
+ if (CPIO.equalsIgnoreCase(archiverName)) {
+ if (entryEncoding != null) {
+ return new CpioArchiveInputStream(in, entryEncoding);
+ } else {
+ return new CpioArchiveInputStream(in);
+ }
+ }
+ if (DUMP.equalsIgnoreCase(archiverName)) {
+ if (entryEncoding != null) {
+ return new DumpArchiveInputStream(in, entryEncoding);
+ } else {
+ return new DumpArchiveInputStream(in);
+ }
+ }
+
+ throw new ArchiveException("Archiver: " + archiverName + " not found.");
+ }
+
+ /**
+ * Create an archive output stream from an archiver name and an input stream.
+ *
+ * @param archiverName the archive name, i.e. "ar", "zip", "tar", "jar" or "cpio"
+ * @param out the output stream
+ * @return the archive output stream
+ * @throws ArchiveException if the archiver name is not known
+ * @throws IllegalArgumentException if the archiver name or stream is null
+ */
+ public ArchiveOutputStream createArchiveOutputStream(
+ final String archiverName, final OutputStream out)
+ throws ArchiveException {
+ if (archiverName == null) {
+ throw new IllegalArgumentException("Archivername must not be null.");
+ }
+ if (out == null) {
+ throw new IllegalArgumentException("OutputStream must not be null.");
+ }
+
+ if (AR.equalsIgnoreCase(archiverName)) {
+ return new ArArchiveOutputStream(out);
+ }
+ if (ZIP.equalsIgnoreCase(archiverName)) {
+ ZipArchiveOutputStream zip = new ZipArchiveOutputStream(out);
+ if (entryEncoding != null) {
+ zip.setEncoding(entryEncoding);
+ }
+ return zip;
+ }
+ if (TAR.equalsIgnoreCase(archiverName)) {
+ if (entryEncoding != null) {
+ return new TarArchiveOutputStream(out, entryEncoding);
+ } else {
+ return new TarArchiveOutputStream(out);
+ }
+ }
+ if (JAR.equalsIgnoreCase(archiverName)) {
+ return new JarArchiveOutputStream(out);
+ }
+ if (CPIO.equalsIgnoreCase(archiverName)) {
+ if (entryEncoding != null) {
+ return new CpioArchiveOutputStream(out, entryEncoding);
+ } else {
+ return new CpioArchiveOutputStream(out);
+ }
+ }
+ throw new ArchiveException("Archiver: " + archiverName + " not found.");
+ }
+
+ /**
+ * Create an archive input stream from an input stream, autodetecting
+ * the archive type from the first few bytes of the stream. The InputStream
+ * must support marks, like BufferedInputStream.
+ *
+ * @param in the input stream
+ * @return the archive input stream
+ * @throws ArchiveException if the archiver name is not known
+ * @throws IllegalArgumentException if the stream is null or does not support mark
+ */
+ public ArchiveInputStream createArchiveInputStream(final InputStream in)
+ throws ArchiveException {
+ if (in == null) {
+ throw new IllegalArgumentException("Stream must not be null.");
+ }
+
+ if (!in.markSupported()) {
+ throw new IllegalArgumentException("Mark is not supported.");
+ }
+
+ final byte[] signature = new byte[12];
+ in.mark(signature.length);
+ try {
+ int signatureLength = IOUtils.readFully(in, signature);
+ in.reset();
+ if (ZipArchiveInputStream.matches(signature, signatureLength)) {
+ if (entryEncoding != null) {
+ return new ZipArchiveInputStream(in, entryEncoding);
+ } else {
+ return new ZipArchiveInputStream(in);
+ }
+ } else if (JarArchiveInputStream.matches(signature, signatureLength)) {
+ return new JarArchiveInputStream(in);
+ } else if (ArArchiveInputStream.matches(signature, signatureLength)) {
+ return new ArArchiveInputStream(in);
+ } else if (CpioArchiveInputStream.matches(signature, signatureLength)) {
+ return new CpioArchiveInputStream(in);
+ } else if (ArjArchiveInputStream.matches(signature, signatureLength)) {
+ return new ArjArchiveInputStream(in);
+ }
+
+ // Dump needs a bigger buffer to check the signature;
+ final byte[] dumpsig = new byte[32];
+ in.mark(dumpsig.length);
+ signatureLength = IOUtils.readFully(in, dumpsig);
+ in.reset();
+ if (DumpArchiveInputStream.matches(dumpsig, signatureLength)) {
+ return new DumpArchiveInputStream(in);
+ }
+
+ // Tar needs an even bigger buffer to check the signature; read the first block
+ final byte[] tarheader = new byte[512];
+ in.mark(tarheader.length);
+ signatureLength = IOUtils.readFully(in, tarheader);
+ in.reset();
+ if (TarArchiveInputStream.matches(tarheader, signatureLength)) {
+ if (entryEncoding != null) {
+ return new TarArchiveInputStream(in, entryEncoding);
+ } else {
+ return new TarArchiveInputStream(in);
+ }
+ }
+ // COMPRESS-117 - improve auto-recognition
+ if (signatureLength >= 512) {
+ TarArchiveInputStream tais = null;
+ try {
+ tais = new TarArchiveInputStream(new ByteArrayInputStream(tarheader));
+ // COMPRESS-191 - verify the header checksum
+ if (tais.getNextTarEntry().isCheckSumOK()) {
+ return new TarArchiveInputStream(in);
+ }
+ } catch (Exception e) { // NOPMD
+ // can generate IllegalArgumentException as well
+ // as IOException
+ // autodetection, simply not a TAR
+ // ignored
+ } finally {
+ IOUtils.closeQuietly(tais);
+ }
+ }
+ } catch (IOException e) {
+ throw new ArchiveException("Could not use reset and mark operations.", e);
+ }
+
+ throw new ArchiveException("No Archiver found for the stream signature");
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/Lister.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/Lister.java
new file mode 100644
index 000000000..f2eaeb966
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/Lister.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.commons.compress.archivers;
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStream;
+
+/**
+ * Simple command line application that lists the contents of an archive.
+ *
+ * The name of the archive must be given as a command line argument.
+ * The optional second argument defines the archive type, in case the format is not recognised.
+ *
+ * @since 1.1
+ */
+public final class Lister {
+ private static final ArchiveStreamFactory factory = new ArchiveStreamFactory();
+
+ public static void main(String[] args) throws Exception {
+ if (args.length == 0) {
+ usage();
+ return;
+ }
+ System.out.println("Analysing "+args[0]);
+ File f = new File(args[0]);
+ if (!f.isFile()) {
+ System.err.println(f + " doesn't exist or is a directory");
+ }
+ InputStream fis = new BufferedInputStream(new FileInputStream(f));
+ ArchiveInputStream ais;
+ if (args.length > 1) {
+ ais = factory.createArchiveInputStream(args[1], fis);
+ } else {
+ ais = factory.createArchiveInputStream(fis);
+ }
+ System.out.println("Created "+ais.toString());
+ ArchiveEntry ae;
+ while((ae=ais.getNextEntry()) != null){
+ System.out.println(ae.getName());
+ }
+ ais.close();
+ fis.close();
+ }
+
+ private static void usage() {
+ System.out.println("Parameters: archive-name [archive-type]");
+ }
+
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/ArArchiveEntry.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/ArArchiveEntry.java
new file mode 100644
index 000000000..e32749cd6
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/ArArchiveEntry.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.ar;
+
+import java.io.File;
+import java.util.Date;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+
+/**
+ * Represents an archive entry in the "ar" format.
+ *
+ * Each AR archive starts with "!<arch>" followed by a LF. After these 8 bytes
+ * the archive entries are listed. The format of an entry header is as it follows:
+ *
+ *
+ * START BYTE END BYTE NAME FORMAT LENGTH
+ * 0 15 File name ASCII 16
+ * 16 27 Modification timestamp Decimal 12
+ * 28 33 Owner ID Decimal 6
+ * 34 39 Group ID Decimal 6
+ * 40 47 File mode Octal 8
+ * 48 57 File size (bytes) Decimal 10
+ * 58 59 File magic \140\012 2
+ *
+ *
+ * This specifies that an ar archive entry header contains 60 bytes.
+ *
+ * Due to the limitation of the file name length to 16 bytes GNU and
+ * BSD has their own variants of this format. Currently Commons
+ * Compress can read but not write the GNU variant. It fully supports
+ * the BSD variant.
+ *
+ * @see ar man page
+ *
+ * @Immutable
+ */
+public class ArArchiveEntry implements ArchiveEntry {
+
+ /** The header for each entry */
+ public static final String HEADER = "!\n";
+
+ /** The trailer for each entry */
+ public static final String TRAILER = "`\012";
+
+ /**
+ * SVR4/GNU adds a trailing / to names; BSD does not.
+ * They also vary in how names longer than 16 characters are represented.
+ * (Not yet fully supported by this implementation)
+ */
+ private final String name;
+ private final int userId;
+ private final int groupId;
+ private final int mode;
+ private static final int DEFAULT_MODE = 33188; // = (octal) 0100644
+ private final long lastModified;
+ private final long length;
+
+ /**
+ * Create a new instance using a couple of default values.
+ *
+ * Sets userId and groupId to 0, the octal file mode to 644 and
+ * the last modified time to the current time.
+ *
+ * @param name name of the entry
+ * @param length length of the entry in bytes
+ */
+ public ArArchiveEntry(String name, long length) {
+ this(name, length, 0, 0, DEFAULT_MODE,
+ System.currentTimeMillis() / 1000);
+ }
+
+ /**
+ * Create a new instance.
+ *
+ * @param name name of the entry
+ * @param length length of the entry in bytes
+ * @param userId numeric user id
+ * @param groupId numeric group id
+ * @param mode file mode
+ * @param lastModified last modified time in seconds since the epoch
+ */
+ public ArArchiveEntry(String name, long length, int userId, int groupId,
+ int mode, long lastModified) {
+ this.name = name;
+ this.length = length;
+ this.userId = userId;
+ this.groupId = groupId;
+ this.mode = mode;
+ this.lastModified = lastModified;
+ }
+
+ /**
+ * Create a new instance using the attributes of the given file
+ */
+ public ArArchiveEntry(File inputFile, String entryName) {
+ // TODO sort out mode
+ this(entryName, inputFile.isFile() ? inputFile.length() : 0,
+ 0, 0, DEFAULT_MODE, inputFile.lastModified() / 1000);
+ }
+
+ public long getSize() {
+ return this.getLength();
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public int getUserId() {
+ return userId;
+ }
+
+ public int getGroupId() {
+ return groupId;
+ }
+
+ public int getMode() {
+ return mode;
+ }
+
+ /**
+ * Last modified time in seconds since the epoch.
+ */
+ public long getLastModified() {
+ return lastModified;
+ }
+
+ public Date getLastModifiedDate() {
+ return new Date(1000 * getLastModified());
+ }
+
+ public long getLength() {
+ return length;
+ }
+
+ public boolean isDirectory() {
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + (name == null ? 0 : name.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ ArArchiveEntry other = (ArArchiveEntry) obj;
+ if (name == null) {
+ if (other.name != null) {
+ return false;
+ }
+ } else if (!name.equals(other.name)) {
+ return false;
+ }
+ return true;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/ArArchiveInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/ArArchiveInputStream.java
new file mode 100644
index 000000000..e65f27bea
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/ArArchiveInputStream.java
@@ -0,0 +1,406 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.ar;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveInputStream;
+import org.apache.commons.compress.utils.ArchiveUtils;
+import org.apache.commons.compress.utils.IOUtils;
+
+/**
+ * Implements the "ar" archive format as an input stream.
+ *
+ * @NotThreadSafe
+ *
+ */
+public class ArArchiveInputStream extends ArchiveInputStream {
+
+ private final InputStream input;
+ private long offset = 0;
+ private boolean closed;
+
+ /*
+ * If getNextEnxtry has been called, the entry metadata is stored in
+ * currentEntry.
+ */
+ private ArArchiveEntry currentEntry = null;
+
+ // Storage area for extra long names (GNU ar)
+ private byte[] namebuffer = null;
+
+ /*
+ * The offset where the current entry started. -1 if no entry has been
+ * called
+ */
+ private long entryOffset = -1;
+
+ // cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection)
+ private final byte[] NAME_BUF = new byte[16];
+ private final byte[] LAST_MODIFIED_BUF = new byte[12];
+ private final byte[] ID_BUF = new byte[6];
+ private final byte[] FILE_MODE_BUF = new byte[8];
+ private final byte[] LENGTH_BUF = new byte[10];
+
+ /**
+ * Constructs an Ar input stream with the referenced stream
+ *
+ * @param pInput
+ * the ar input stream
+ */
+ public ArArchiveInputStream(final InputStream pInput) {
+ input = pInput;
+ closed = false;
+ }
+
+ /**
+ * Returns the next AR entry in this stream.
+ *
+ * @return the next AR entry.
+ * @throws IOException
+ * if the entry could not be read
+ */
+ public ArArchiveEntry getNextArEntry() throws IOException {
+ if (currentEntry != null) {
+ final long entryEnd = entryOffset + currentEntry.getLength();
+ IOUtils.skip(this, entryEnd - offset);
+ currentEntry = null;
+ }
+
+ if (offset == 0) {
+ final byte[] expected = ArchiveUtils.toAsciiBytes(ArArchiveEntry.HEADER);
+ final byte[] realized = new byte[expected.length];
+ final int read = IOUtils.readFully(this, realized);
+ if (read != expected.length) {
+ throw new IOException("failed to read header. Occured at byte: " + getBytesRead());
+ }
+ for (int i = 0; i < expected.length; i++) {
+ if (expected[i] != realized[i]) {
+ throw new IOException("invalid header " + ArchiveUtils.toAsciiString(realized));
+ }
+ }
+ }
+
+ if (offset % 2 != 0 && read() < 0) {
+ // hit eof
+ return null;
+ }
+
+ if (input.available() == 0) {
+ return null;
+ }
+
+ IOUtils.readFully(this, NAME_BUF);
+ IOUtils.readFully(this, LAST_MODIFIED_BUF);
+ IOUtils.readFully(this, ID_BUF);
+ int userId = asInt(ID_BUF, true);
+ IOUtils.readFully(this, ID_BUF);
+ IOUtils.readFully(this, FILE_MODE_BUF);
+ IOUtils.readFully(this, LENGTH_BUF);
+
+ {
+ final byte[] expected = ArchiveUtils.toAsciiBytes(ArArchiveEntry.TRAILER);
+ final byte[] realized = new byte[expected.length];
+ final int read = IOUtils.readFully(this, realized);
+ if (read != expected.length) {
+ throw new IOException("failed to read entry trailer. Occured at byte: " + getBytesRead());
+ }
+ for (int i = 0; i < expected.length; i++) {
+ if (expected[i] != realized[i]) {
+ throw new IOException("invalid entry trailer. not read the content? Occured at byte: " + getBytesRead());
+ }
+ }
+ }
+
+ entryOffset = offset;
+
+// GNU ar uses a '/' to mark the end of the filename; this allows for the use of spaces without the use of an extended filename.
+
+ // entry name is stored as ASCII string
+ String temp = ArchiveUtils.toAsciiString(NAME_BUF).trim();
+ if (isGNUStringTable(temp)) { // GNU extended filenames entry
+ currentEntry = readGNUStringTable(LENGTH_BUF);
+ return getNextArEntry();
+ }
+
+ long len = asLong(LENGTH_BUF);
+ if (temp.endsWith("/")) { // GNU terminator
+ temp = temp.substring(0, temp.length() - 1);
+ } else if (isGNULongName(temp)) {
+ int off = Integer.parseInt(temp.substring(1));// get the offset
+ temp = getExtendedName(off); // convert to the long name
+ } else if (isBSDLongName(temp)) {
+ temp = getBSDLongName(temp);
+ // entry length contained the length of the file name in
+ // addition to the real length of the entry.
+ // assume file name was ASCII, there is no "standard" otherwise
+ int nameLen = temp.length();
+ len -= nameLen;
+ entryOffset += nameLen;
+ }
+
+ currentEntry = new ArArchiveEntry(temp, len, userId,
+ asInt(ID_BUF, true),
+ asInt(FILE_MODE_BUF, 8),
+ asLong(LAST_MODIFIED_BUF));
+ return currentEntry;
+ }
+
+ /**
+ * Get an extended name from the GNU extended name buffer.
+ *
+ * @param offset pointer to entry within the buffer
+ * @return the extended file name; without trailing "/" if present.
+ * @throws IOException if name not found or buffer not set up
+ */
+ private String getExtendedName(int offset) throws IOException{
+ if (namebuffer == null) {
+ throw new IOException("Cannot process GNU long filename as no // record was found");
+ }
+ for(int i=offset; i < namebuffer.length; i++){
+ if (namebuffer[i]=='\012'){
+ if (namebuffer[i-1]=='/') {
+ i--; // drop trailing /
+ }
+ return ArchiveUtils.toAsciiString(namebuffer, offset, i-offset);
+ }
+ }
+ throw new IOException("Failed to read entry: "+offset);
+ }
+ private long asLong(byte[] input) {
+ return Long.parseLong(ArchiveUtils.toAsciiString(input).trim());
+ }
+
+ private int asInt(byte[] input) {
+ return asInt(input, 10, false);
+ }
+
+ private int asInt(byte[] input, boolean treatBlankAsZero) {
+ return asInt(input, 10, treatBlankAsZero);
+ }
+
+ private int asInt(byte[] input, int base) {
+ return asInt(input, base, false);
+ }
+
+ private int asInt(byte[] input, int base, boolean treatBlankAsZero) {
+ String string = ArchiveUtils.toAsciiString(input).trim();
+ if (string.length() == 0 && treatBlankAsZero) {
+ return 0;
+ }
+ return Integer.parseInt(string, base);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.commons.compress.archivers.ArchiveInputStream#getNextEntry()
+ */
+ @Override
+ public ArchiveEntry getNextEntry() throws IOException {
+ return getNextArEntry();
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.io.InputStream#close()
+ */
+ @Override
+ public void close() throws IOException {
+ if (!closed) {
+ closed = true;
+ input.close();
+ }
+ currentEntry = null;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.io.InputStream#read(byte[], int, int)
+ */
+ @Override
+ public int read(byte[] b, final int off, final int len) throws IOException {
+ int toRead = len;
+ if (currentEntry != null) {
+ final long entryEnd = entryOffset + currentEntry.getLength();
+ if (len > 0 && entryEnd > offset) {
+ toRead = (int) Math.min(len, entryEnd - offset);
+ } else {
+ return -1;
+ }
+ }
+ final int ret = this.input.read(b, off, toRead);
+ count(ret);
+ offset += ret > 0 ? ret : 0;
+ return ret;
+ }
+
+ /**
+ * Checks if the signature matches ASCII "!<arch>" followed by a single LF
+ * control character
+ *
+ * @param signature
+ * the bytes to check
+ * @param length
+ * the number of bytes to check
+ * @return true, if this stream is an Ar archive stream, false otherwise
+ */
+ public static boolean matches(byte[] signature, int length) {
+ // 3c21 7261 6863 0a3e
+
+ if (length < 8) {
+ return false;
+ }
+ if (signature[0] != 0x21) {
+ return false;
+ }
+ if (signature[1] != 0x3c) {
+ return false;
+ }
+ if (signature[2] != 0x61) {
+ return false;
+ }
+ if (signature[3] != 0x72) {
+ return false;
+ }
+ if (signature[4] != 0x63) {
+ return false;
+ }
+ if (signature[5] != 0x68) {
+ return false;
+ }
+ if (signature[6] != 0x3e) {
+ return false;
+ }
+ if (signature[7] != 0x0a) {
+ return false;
+ }
+
+ return true;
+ }
+
+ static final String BSD_LONGNAME_PREFIX = "#1/";
+ private static final int BSD_LONGNAME_PREFIX_LEN =
+ BSD_LONGNAME_PREFIX.length();
+ private static final String BSD_LONGNAME_PATTERN =
+ "^" + BSD_LONGNAME_PREFIX + "\\d+";
+
+ /**
+ * Does the name look like it is a long name (or a name containing
+ * spaces) as encoded by BSD ar?
+ *
+ * From the FreeBSD ar(5) man page:
+ *
+ * BSD In the BSD variant, names that are shorter than 16
+ * characters and without embedded spaces are stored
+ * directly in this field. If a name has an embedded
+ * space, or if it is longer than 16 characters, then
+ * the string "#1/" followed by the decimal represen-
+ * tation of the length of the file name is placed in
+ * this field. The actual file name is stored immedi-
+ * ately after the archive header. The content of the
+ * archive member follows the file name. The ar_size
+ * field of the header (see below) will then hold the
+ * sum of the size of the file name and the size of
+ * the member.
+ *
+ *
+ * @since 1.3
+ */
+ private static boolean isBSDLongName(String name) {
+ return name != null && name.matches(BSD_LONGNAME_PATTERN);
+ }
+
+ /**
+ * Reads the real name from the current stream assuming the very
+ * first bytes to be read are the real file name.
+ *
+ * @see #isBSDLongName
+ *
+ * @since 1.3
+ */
+ private String getBSDLongName(String bsdLongName) throws IOException {
+ int nameLen =
+ Integer.parseInt(bsdLongName.substring(BSD_LONGNAME_PREFIX_LEN));
+ byte[] name = new byte[nameLen];
+ int read = IOUtils.readFully(input, name);
+ count(read);
+ if (read != nameLen) {
+ throw new EOFException();
+ }
+ return ArchiveUtils.toAsciiString(name);
+ }
+
+ private static final String GNU_STRING_TABLE_NAME = "//";
+
+ /**
+ * Is this the name of the "Archive String Table" as used by
+ * SVR4/GNU to store long file names?
+ *
+ * GNU ar stores multiple extended filenames in the data section
+ * of a file with the name "//", this record is referred to by
+ * future headers.
+ *
+ * A header references an extended filename by storing a "/"
+ * followed by a decimal offset to the start of the filename in
+ * the extended filename data section.
+ *
+ * The format of the "//" file itself is simply a list of the
+ * long filenames, each separated by one or more LF
+ * characters. Note that the decimal offsets are number of
+ * characters, not line or string number within the "//" file.
+ */
+ private static boolean isGNUStringTable(String name) {
+ return GNU_STRING_TABLE_NAME.equals(name);
+ }
+
+ /**
+ * Reads the GNU archive String Table.
+ *
+ * @see #isGNUStringTable
+ */
+ private ArArchiveEntry readGNUStringTable(byte[] length) throws IOException {
+ int bufflen = asInt(length); // Assume length will fit in an int
+ namebuffer = new byte[bufflen];
+ int read = read(namebuffer, 0, bufflen);
+ if (read != bufflen){
+ throw new IOException("Failed to read complete // record: expected="
+ + bufflen + " read=" + read);
+ }
+ return new ArArchiveEntry(GNU_STRING_TABLE_NAME, bufflen);
+ }
+
+ private static final String GNU_LONGNAME_PATTERN = "^/\\d+";
+
+ /**
+ * Does the name look like it is a long name (or a name containing
+ * spaces) as encoded by SVR4/GNU ar?
+ *
+ * @see #isGNUStringTable
+ */
+ private boolean isGNULongName(String name) {
+ return name != null && name.matches(GNU_LONGNAME_PATTERN);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/ArArchiveOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/ArArchiveOutputStream.java
new file mode 100644
index 000000000..17fe61f77
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/ArArchiveOutputStream.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.ar;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.utils.ArchiveUtils;
+
+/**
+ * Implements the "ar" archive format as an output stream.
+ *
+ * @NotThreadSafe
+ */
+public class ArArchiveOutputStream extends ArchiveOutputStream {
+ /** Fail if a long file name is required in the archive. */
+ public static final int LONGFILE_ERROR = 0;
+
+ /** BSD ar extensions are used to store long file names in the archive. */
+ public static final int LONGFILE_BSD = 1;
+
+ private final OutputStream out;
+ private long entryOffset = 0;
+ private ArArchiveEntry prevEntry;
+ private boolean haveUnclosedEntry = false;
+ private int longFileMode = LONGFILE_ERROR;
+
+ /** indicates if this archive is finished */
+ private boolean finished = false;
+
+ public ArArchiveOutputStream( final OutputStream pOut ) {
+ this.out = pOut;
+ }
+
+ /**
+ * Set the long file mode.
+ * This can be LONGFILE_ERROR(0) or LONGFILE_BSD(1).
+ * This specifies the treatment of long file names (names >= 16).
+ * Default is LONGFILE_ERROR.
+ * @param longFileMode the mode to use
+ * @since 1.3
+ */
+ public void setLongFileMode(int longFileMode) {
+ this.longFileMode = longFileMode;
+ }
+
+ private long writeArchiveHeader() throws IOException {
+ byte [] header = ArchiveUtils.toAsciiBytes(ArArchiveEntry.HEADER);
+ out.write(header);
+ return header.length;
+ }
+
+ @Override
+ public void closeArchiveEntry() throws IOException {
+ if(finished) {
+ throw new IOException("Stream has already been finished");
+ }
+ if (prevEntry == null || !haveUnclosedEntry){
+ throw new IOException("No current entry to close");
+ }
+ if (entryOffset % 2 != 0) {
+ out.write('\n'); // Pad byte
+ }
+ haveUnclosedEntry = false;
+ }
+
+ @Override
+ public void putArchiveEntry( final ArchiveEntry pEntry ) throws IOException {
+ if(finished) {
+ throw new IOException("Stream has already been finished");
+ }
+
+ ArArchiveEntry pArEntry = (ArArchiveEntry)pEntry;
+ if (prevEntry == null) {
+ writeArchiveHeader();
+ } else {
+ if (prevEntry.getLength() != entryOffset) {
+ throw new IOException("length does not match entry (" + prevEntry.getLength() + " != " + entryOffset);
+ }
+
+ if (haveUnclosedEntry) {
+ closeArchiveEntry();
+ }
+ }
+
+ prevEntry = pArEntry;
+
+ writeEntryHeader(pArEntry);
+
+ entryOffset = 0;
+ haveUnclosedEntry = true;
+ }
+
+ private long fill( final long pOffset, final long pNewOffset, final char pFill ) throws IOException {
+ final long diff = pNewOffset - pOffset;
+
+ if (diff > 0) {
+ for (int i = 0; i < diff; i++) {
+ write(pFill);
+ }
+ }
+
+ return pNewOffset;
+ }
+
+ private long write( final String data ) throws IOException {
+ final byte[] bytes = data.getBytes("ascii");
+ write(bytes);
+ return bytes.length;
+ }
+
+ private long writeEntryHeader( final ArArchiveEntry pEntry ) throws IOException {
+
+ long offset = 0;
+ boolean mustAppendName = false;
+
+ final String n = pEntry.getName();
+ if (LONGFILE_ERROR == longFileMode && n.length() > 16) {
+ throw new IOException("filename too long, > 16 chars: "+n);
+ }
+ if (LONGFILE_BSD == longFileMode &&
+ (n.length() > 16 || n.indexOf(" ") > -1)) {
+ mustAppendName = true;
+ offset += write(ArArchiveInputStream.BSD_LONGNAME_PREFIX
+ + String.valueOf(n.length()));
+ } else {
+ offset += write(n);
+ }
+
+ offset = fill(offset, 16, ' ');
+ final String m = "" + pEntry.getLastModified();
+ if (m.length() > 12) {
+ throw new IOException("modified too long");
+ }
+ offset += write(m);
+
+ offset = fill(offset, 28, ' ');
+ final String u = "" + pEntry.getUserId();
+ if (u.length() > 6) {
+ throw new IOException("userid too long");
+ }
+ offset += write(u);
+
+ offset = fill(offset, 34, ' ');
+ final String g = "" + pEntry.getGroupId();
+ if (g.length() > 6) {
+ throw new IOException("groupid too long");
+ }
+ offset += write(g);
+
+ offset = fill(offset, 40, ' ');
+ final String fm = "" + Integer.toString(pEntry.getMode(), 8);
+ if (fm.length() > 8) {
+ throw new IOException("filemode too long");
+ }
+ offset += write(fm);
+
+ offset = fill(offset, 48, ' ');
+ final String s =
+ String.valueOf(pEntry.getLength()
+ + (mustAppendName ? n.length() : 0));
+ if (s.length() > 10) {
+ throw new IOException("size too long");
+ }
+ offset += write(s);
+
+ offset = fill(offset, 58, ' ');
+
+ offset += write(ArArchiveEntry.TRAILER);
+
+ if (mustAppendName) {
+ offset += write(n);
+ }
+
+ return offset;
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ out.write(b, off, len);
+ count(len);
+ entryOffset += len;
+ }
+
+ /**
+ * Calls finish if necessary, and then closes the OutputStream
+ */
+ @Override
+ public void close() throws IOException {
+ if(!finished) {
+ finish();
+ }
+ out.close();
+ prevEntry = null;
+ }
+
+ @Override
+ public ArchiveEntry createArchiveEntry(File inputFile, String entryName)
+ throws IOException {
+ if(finished) {
+ throw new IOException("Stream has already been finished");
+ }
+ return new ArArchiveEntry(inputFile, entryName);
+ }
+
+ @Override
+ public void finish() throws IOException {
+ if(haveUnclosedEntry) {
+ throw new IOException("This archive contains unclosed entries.");
+ } else if(finished) {
+ throw new IOException("This archive has already been finished");
+ }
+ finished = true;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/package.html
new file mode 100644
index 000000000..9c80f96e1
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/ar/package.html
@@ -0,0 +1,24 @@
+
+
+
+ Provides stream classes for reading and writing archives using
+ the AR format.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/ArjArchiveEntry.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/ArjArchiveEntry.java
new file mode 100644
index 000000000..80b078aa5
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/ArjArchiveEntry.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.arj;
+
+import java.io.File;
+import java.util.Date;
+import java.util.regex.Matcher;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.zip.ZipUtil;
+
+/**
+ * An entry in an ARJ archive.
+ *
+ * @NotThreadSafe
+ * @since 1.6
+ */
+public class ArjArchiveEntry implements ArchiveEntry {
+ private final LocalFileHeader localFileHeader;
+
+ public ArjArchiveEntry() {
+ localFileHeader = new LocalFileHeader();
+ }
+
+ ArjArchiveEntry(final LocalFileHeader localFileHeader) {
+ this.localFileHeader = localFileHeader;
+ }
+
+ /**
+ * Get this entry's name.
+ *
+ * @return This entry's name.
+ */
+ public String getName() {
+ if ((localFileHeader.arjFlags & LocalFileHeader.Flags.PATHSYM) != 0) {
+ return localFileHeader.name.replaceAll("/",
+ Matcher.quoteReplacement(File.separator));
+ } else {
+ return localFileHeader.name;
+ }
+ }
+
+ /**
+ * Get this entry's file size.
+ *
+ * @return This entry's file size.
+ */
+ public long getSize() {
+ return localFileHeader.originalSize;
+ }
+
+ /** True if the entry refers to a directory */
+ public boolean isDirectory() {
+ return localFileHeader.fileType == LocalFileHeader.FileTypes.DIRECTORY;
+ }
+
+ /**
+ * The last modified date of the entry.
+ *
+ * Note the interpretation of time is different depending on
+ * the HostOS that has created the archive. While an OS that is
+ * {@link #isHostOsUnix considered to be Unix} stores time in a
+ * timezone independent manner, other platforms only use the local
+ * time. I.e. if an archive has been created at midnight UTC on a
+ * machine in timezone UTC this method will return midnight
+ * regardless of timezone if the archive has been created on a
+ * non-Unix system and a time taking the current timezone into
+ * account if the archive has beeen created on Unix.
+ */
+ public Date getLastModifiedDate() {
+ long ts = isHostOsUnix() ? localFileHeader.dateTimeModified * 1000l
+ : ZipUtil.dosToJavaTime(0xFFFFFFFFL & localFileHeader.dateTimeModified);
+ return new Date(ts);
+ }
+
+ /**
+ * File mode of this entry.
+ *
+ * The format depends on the host os that created the entry.
+ */
+ public int getMode() {
+ return localFileHeader.fileAccessMode;
+ }
+
+ /**
+ * File mode of this entry as Unix stat value.
+ *
+ * Will only be non-zero of the host os was UNIX.
+ */
+ public int getUnixMode() {
+ return isHostOsUnix() ? getMode() : 0;
+ }
+
+ /**
+ * The operating system the archive has been created on.
+ * @see HostOs
+ */
+ public int getHostOs() {
+ return localFileHeader.hostOS;
+ }
+
+ /**
+ * Is the operating system the archive has been created on one
+ * that is considered a UNIX OS by arj?
+ */
+ public boolean isHostOsUnix() {
+ return getHostOs() == HostOs.UNIX || getHostOs() == HostOs.NEXT;
+ }
+
+ /**
+ * The known values for HostOs.
+ */
+ public static class HostOs {
+ public static final int DOS = 0;
+ public static final int PRIMOS = 1;
+ public static final int UNIX = 2;
+ public static final int AMIGA = 3;
+ public static final int MAC_OS = 4;
+ public static final int OS_2 = 5;
+ public static final int APPLE_GS = 6;
+ public static final int ATARI_ST = 7;
+ public static final int NEXT = 8;
+ public static final int VAX_VMS = 9;
+ public static final int WIN95 = 10;
+ public static final int WIN32 = 11;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/ArjArchiveInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/ArjArchiveInputStream.java
new file mode 100644
index 000000000..f99aa0da4
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/ArjArchiveInputStream.java
@@ -0,0 +1,349 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.arj;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.zip.CRC32;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveException;
+import org.apache.commons.compress.archivers.ArchiveInputStream;
+import org.apache.commons.compress.utils.BoundedInputStream;
+import org.apache.commons.compress.utils.CRC32VerifyingInputStream;
+import org.apache.commons.compress.utils.IOUtils;
+
+/**
+ * Implements the "arj" archive format as an InputStream.
+ *
+ * Reference
+ * @NotThreadSafe
+ * @since 1.6
+ */
+public class ArjArchiveInputStream extends ArchiveInputStream {
+ private static final int ARJ_MAGIC_1 = 0x60;
+ private static final int ARJ_MAGIC_2 = 0xEA;
+ private final DataInputStream in;
+ private final String charsetName;
+ private final MainHeader mainHeader;
+ private LocalFileHeader currentLocalFileHeader = null;
+ private InputStream currentInputStream = null;
+
+ /**
+ * Constructs the ArjInputStream, taking ownership of the inputStream that is passed in.
+ * @param inputStream the underlying stream, whose ownership is taken
+ * @param charsetName the charset used for file names and comments
+ * in the archive
+ * @throws ArchiveException
+ */
+ public ArjArchiveInputStream(final InputStream inputStream,
+ final String charsetName) throws ArchiveException {
+ in = new DataInputStream(inputStream);
+ this.charsetName = charsetName;
+ try {
+ mainHeader = readMainHeader();
+ if ((mainHeader.arjFlags & MainHeader.Flags.GARBLED) != 0) {
+ throw new ArchiveException("Encrypted ARJ files are unsupported");
+ }
+ if ((mainHeader.arjFlags & MainHeader.Flags.VOLUME) != 0) {
+ throw new ArchiveException("Multi-volume ARJ files are unsupported");
+ }
+ } catch (IOException ioException) {
+ throw new ArchiveException(ioException.getMessage(), ioException);
+ }
+ }
+
+ /**
+ * Constructs the ArjInputStream, taking ownership of the inputStream that is passed in,
+ * and using the CP437 character encoding.
+ * @param inputStream the underlying stream, whose ownership is taken
+ * @throws ArchiveException
+ */
+ public ArjArchiveInputStream(final InputStream inputStream)
+ throws ArchiveException {
+ this(inputStream, "CP437");
+ }
+
+ @Override
+ public void close() throws IOException {
+ in.close();
+ }
+
+ private int read8(final DataInputStream dataIn) throws IOException {
+ int value = dataIn.readUnsignedByte();
+ count(1);
+ return value;
+ }
+
+ private int read16(final DataInputStream dataIn) throws IOException {
+ final int value = dataIn.readUnsignedShort();
+ count(2);
+ return Integer.reverseBytes(value) >>> 16;
+ }
+
+ private int read32(final DataInputStream dataIn) throws IOException {
+ final int value = dataIn.readInt();
+ count(4);
+ return Integer.reverseBytes(value);
+ }
+
+ private String readString(final DataInputStream dataIn) throws IOException {
+ final ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+ int nextByte;
+ while ((nextByte = dataIn.readUnsignedByte()) != 0) {
+ buffer.write(nextByte);
+ }
+ if (charsetName != null) {
+ return new String(buffer.toByteArray(), charsetName);
+ } else {
+ // intentionally using the default encoding as that's the contract for a null charsetName
+ return new String(buffer.toByteArray());
+ }
+ }
+
+ private void readFully(final DataInputStream dataIn, byte[] b)
+ throws IOException {
+ dataIn.readFully(b);
+ count(b.length);
+ }
+
+ private byte[] readHeader() throws IOException {
+ boolean found = false;
+ byte[] basicHeaderBytes = null;
+ do {
+ int first = 0;
+ int second = read8(in);
+ do {
+ first = second;
+ second = read8(in);
+ } while (first != ARJ_MAGIC_1 && second != ARJ_MAGIC_2);
+ final int basicHeaderSize = read16(in);
+ if (basicHeaderSize == 0) {
+ // end of archive
+ return null;
+ }
+ if (basicHeaderSize <= 2600) {
+ basicHeaderBytes = new byte[basicHeaderSize];
+ readFully(in, basicHeaderBytes);
+ final long basicHeaderCrc32 = read32(in) & 0xFFFFFFFFL;
+ final CRC32 crc32 = new CRC32();
+ crc32.update(basicHeaderBytes);
+ if (basicHeaderCrc32 == crc32.getValue()) {
+ found = true;
+ }
+ }
+ } while (!found);
+ return basicHeaderBytes;
+ }
+
+ private MainHeader readMainHeader() throws IOException {
+ final byte[] basicHeaderBytes = readHeader();
+ if (basicHeaderBytes == null) {
+ throw new IOException("Archive ends without any headers");
+ }
+ final DataInputStream basicHeader = new DataInputStream(
+ new ByteArrayInputStream(basicHeaderBytes));
+
+ final int firstHeaderSize = basicHeader.readUnsignedByte();
+ final byte[] firstHeaderBytes = new byte[firstHeaderSize - 1];
+ basicHeader.readFully(firstHeaderBytes);
+ final DataInputStream firstHeader = new DataInputStream(
+ new ByteArrayInputStream(firstHeaderBytes));
+
+ final MainHeader hdr = new MainHeader();
+ hdr.archiverVersionNumber = firstHeader.readUnsignedByte();
+ hdr.minVersionToExtract = firstHeader.readUnsignedByte();
+ hdr.hostOS = firstHeader.readUnsignedByte();
+ hdr.arjFlags = firstHeader.readUnsignedByte();
+ hdr.securityVersion = firstHeader.readUnsignedByte();
+ hdr.fileType = firstHeader.readUnsignedByte();
+ hdr.reserved = firstHeader.readUnsignedByte();
+ hdr.dateTimeCreated = read32(firstHeader);
+ hdr.dateTimeModified = read32(firstHeader);
+ hdr.archiveSize = 0xffffFFFFL & read32(firstHeader);
+ hdr.securityEnvelopeFilePosition = read32(firstHeader);
+ hdr.fileSpecPosition = read16(firstHeader);
+ hdr.securityEnvelopeLength = read16(firstHeader);
+ pushedBackBytes(20); // count has already counted them via readFully
+ hdr.encryptionVersion = firstHeader.readUnsignedByte();
+ hdr.lastChapter = firstHeader.readUnsignedByte();
+
+ if (firstHeaderSize >= 33) {
+ hdr.arjProtectionFactor = firstHeader.readUnsignedByte();
+ hdr.arjFlags2 = firstHeader.readUnsignedByte();
+ firstHeader.readUnsignedByte();
+ firstHeader.readUnsignedByte();
+ }
+
+ hdr.name = readString(basicHeader);
+ hdr.comment = readString(basicHeader);
+
+ final int extendedHeaderSize = read16(in);
+ if (extendedHeaderSize > 0) {
+ hdr.extendedHeaderBytes = new byte[extendedHeaderSize];
+ readFully(in, hdr.extendedHeaderBytes);
+ final long extendedHeaderCrc32 = 0xffffFFFFL & read32(in);
+ final CRC32 crc32 = new CRC32();
+ crc32.update(hdr.extendedHeaderBytes);
+ if (extendedHeaderCrc32 != crc32.getValue()) {
+ throw new IOException("Extended header CRC32 verification failure");
+ }
+ }
+
+ return hdr;
+ }
+
+ private LocalFileHeader readLocalFileHeader() throws IOException {
+ final byte[] basicHeaderBytes = readHeader();
+ if (basicHeaderBytes == null) {
+ return null;
+ }
+ final DataInputStream basicHeader = new DataInputStream(
+ new ByteArrayInputStream(basicHeaderBytes));
+
+ final int firstHeaderSize = basicHeader.readUnsignedByte();
+ final byte[] firstHeaderBytes = new byte[firstHeaderSize - 1];
+ basicHeader.readFully(firstHeaderBytes);
+ final DataInputStream firstHeader = new DataInputStream(
+ new ByteArrayInputStream(firstHeaderBytes));
+
+ final LocalFileHeader localFileHeader = new LocalFileHeader();
+ localFileHeader.archiverVersionNumber = firstHeader.readUnsignedByte();
+ localFileHeader.minVersionToExtract = firstHeader.readUnsignedByte();
+ localFileHeader.hostOS = firstHeader.readUnsignedByte();
+ localFileHeader.arjFlags = firstHeader.readUnsignedByte();
+ localFileHeader.method = firstHeader.readUnsignedByte();
+ localFileHeader.fileType = firstHeader.readUnsignedByte();
+ localFileHeader.reserved = firstHeader.readUnsignedByte();
+ localFileHeader.dateTimeModified = read32(firstHeader);
+ localFileHeader.compressedSize = 0xffffFFFFL & read32(firstHeader);
+ localFileHeader.originalSize = 0xffffFFFFL & read32(firstHeader);
+ localFileHeader.originalCrc32 = 0xffffFFFFL & read32(firstHeader);
+ localFileHeader.fileSpecPosition = read16(firstHeader);
+ localFileHeader.fileAccessMode = read16(firstHeader);
+ pushedBackBytes(20);
+ localFileHeader.firstChapter = firstHeader.readUnsignedByte();
+ localFileHeader.lastChapter = firstHeader.readUnsignedByte();
+
+ readExtraData(firstHeaderSize, firstHeader, localFileHeader);
+
+ localFileHeader.name = readString(basicHeader);
+ localFileHeader.comment = readString(basicHeader);
+
+ ArrayList extendedHeaders = new ArrayList();
+ int extendedHeaderSize;
+ while ((extendedHeaderSize = read16(in)) > 0) {
+ final byte[] extendedHeaderBytes = new byte[extendedHeaderSize];
+ readFully(in, extendedHeaderBytes);
+ final long extendedHeaderCrc32 = 0xffffFFFFL & read32(in);
+ final CRC32 crc32 = new CRC32();
+ crc32.update(extendedHeaderBytes);
+ if (extendedHeaderCrc32 != crc32.getValue()) {
+ throw new IOException("Extended header CRC32 verification failure");
+ }
+ extendedHeaders.add(extendedHeaderBytes);
+ }
+ localFileHeader.extendedHeaders = extendedHeaders.toArray(new byte[extendedHeaders.size()][]);
+
+ return localFileHeader;
+ }
+
+ private void readExtraData(int firstHeaderSize, DataInputStream firstHeader,
+ LocalFileHeader localFileHeader) throws IOException {
+ if (firstHeaderSize >= 33) {
+ localFileHeader.extendedFilePosition = read32(firstHeader);
+ if (firstHeaderSize >= 45) {
+ localFileHeader.dateTimeAccessed = read32(firstHeader);
+ localFileHeader.dateTimeCreated = read32(firstHeader);
+ localFileHeader.originalSizeEvenForVolumes = read32(firstHeader);
+ pushedBackBytes(12);
+ }
+ pushedBackBytes(4);
+ }
+ }
+
+ /**
+ * Checks if the signature matches what is expected for an arj file.
+ *
+ * @param signature
+ * the bytes to check
+ * @param length
+ * the number of bytes to check
+ * @return true, if this stream is an arj archive stream, false otherwise
+ */
+ public static boolean matches(final byte[] signature, final int length) {
+ return length >= 2 &&
+ (0xff & signature[0]) == ARJ_MAGIC_1 &&
+ (0xff & signature[1]) == ARJ_MAGIC_2;
+ }
+
+ /**
+ * Gets the archive's recorded name.
+ */
+ public String getArchiveName() {
+ return mainHeader.name;
+ }
+
+ /**
+ * Gets the archive's comment.
+ */
+ public String getArchiveComment() {
+ return mainHeader.comment;
+ }
+
+ @Override
+ public ArjArchiveEntry getNextEntry() throws IOException {
+ if (currentInputStream != null) {
+ // return value ignored as IOUtils.skip ensures the stream is drained completely
+ IOUtils.skip(currentInputStream, Long.MAX_VALUE);
+ currentInputStream.close();
+ currentLocalFileHeader = null;
+ currentInputStream = null;
+ }
+
+ currentLocalFileHeader = readLocalFileHeader();
+ if (currentLocalFileHeader != null) {
+ currentInputStream = new BoundedInputStream(in, currentLocalFileHeader.compressedSize);
+ if (currentLocalFileHeader.method == LocalFileHeader.Methods.STORED) {
+ currentInputStream = new CRC32VerifyingInputStream(currentInputStream,
+ currentLocalFileHeader.originalSize, currentLocalFileHeader.originalCrc32);
+ }
+ return new ArjArchiveEntry(currentLocalFileHeader);
+ } else {
+ currentInputStream = null;
+ return null;
+ }
+ }
+
+ @Override
+ public boolean canReadEntryData(ArchiveEntry ae) {
+ return currentLocalFileHeader.method == LocalFileHeader.Methods.STORED;
+ }
+
+ @Override
+ public int read(final byte[] b, final int off, final int len) throws IOException {
+ if (currentLocalFileHeader.method != LocalFileHeader.Methods.STORED) {
+ throw new IOException("Unsupported compression method " + currentLocalFileHeader.method);
+ }
+ return currentInputStream.read(b, off, len);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/LocalFileHeader.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/LocalFileHeader.java
new file mode 100644
index 000000000..d48dc1748
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/LocalFileHeader.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.arj;
+
+import java.util.Arrays;
+
+class LocalFileHeader {
+ int archiverVersionNumber;
+ int minVersionToExtract;
+ int hostOS;
+ int arjFlags;
+ int method;
+ int fileType;
+ int reserved;
+ int dateTimeModified;
+ long compressedSize;
+ long originalSize;
+ long originalCrc32;
+ int fileSpecPosition;
+ int fileAccessMode;
+ int firstChapter;
+ int lastChapter;
+
+ int extendedFilePosition;
+ int dateTimeAccessed;
+ int dateTimeCreated;
+ int originalSizeEvenForVolumes;
+
+ String name;
+ String comment;
+
+ byte[][] extendedHeaders = null;
+
+ static class Flags {
+ static final int GARBLED = 0x01;
+ static final int VOLUME = 0x04;
+ static final int EXTFILE = 0x08;
+ static final int PATHSYM = 0x10;
+ static final int BACKUP = 0x20;
+ }
+
+ static class FileTypes {
+ static final int BINARY = 0;
+ static final int SEVEN_BIT_TEXT = 1;
+ static final int DIRECTORY = 3;
+ static final int VOLUME_LABEL = 4;
+ static final int CHAPTER_LABEL = 5;
+ }
+
+ static class Methods {
+ static final int STORED = 0;
+ static final int COMPRESSED_MOST = 1;
+ static final int COMPRESSED_FASTEST = 4;
+ static final int NO_DATA_NO_CRC = 8;
+ static final int NO_DATA = 9;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("LocalFileHeader [archiverVersionNumber=");
+ builder.append(archiverVersionNumber);
+ builder.append(", minVersionToExtract=");
+ builder.append(minVersionToExtract);
+ builder.append(", hostOS=");
+ builder.append(hostOS);
+ builder.append(", arjFlags=");
+ builder.append(arjFlags);
+ builder.append(", method=");
+ builder.append(method);
+ builder.append(", fileType=");
+ builder.append(fileType);
+ builder.append(", reserved=");
+ builder.append(reserved);
+ builder.append(", dateTimeModified=");
+ builder.append(dateTimeModified);
+ builder.append(", compressedSize=");
+ builder.append(compressedSize);
+ builder.append(", originalSize=");
+ builder.append(originalSize);
+ builder.append(", originalCrc32=");
+ builder.append(originalCrc32);
+ builder.append(", fileSpecPosition=");
+ builder.append(fileSpecPosition);
+ builder.append(", fileAccessMode=");
+ builder.append(fileAccessMode);
+ builder.append(", firstChapter=");
+ builder.append(firstChapter);
+ builder.append(", lastChapter=");
+ builder.append(lastChapter);
+ builder.append(", extendedFilePosition=");
+ builder.append(extendedFilePosition);
+ builder.append(", dateTimeAccessed=");
+ builder.append(dateTimeAccessed);
+ builder.append(", dateTimeCreated=");
+ builder.append(dateTimeCreated);
+ builder.append(", originalSizeEvenForVolumes=");
+ builder.append(originalSizeEvenForVolumes);
+ builder.append(", name=");
+ builder.append(name);
+ builder.append(", comment=");
+ builder.append(comment);
+ builder.append(", extendedHeaders=");
+ builder.append(Arrays.toString(extendedHeaders));
+ builder.append("]");
+ return builder.toString();
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/MainHeader.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/MainHeader.java
new file mode 100644
index 000000000..a41aa72c2
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/MainHeader.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.arj;
+
+import java.util.Arrays;
+
+class MainHeader {
+ int archiverVersionNumber;
+ int minVersionToExtract;
+ int hostOS;
+ int arjFlags;
+ int securityVersion;
+ int fileType;
+ int reserved;
+ int dateTimeCreated;
+ int dateTimeModified;
+ long archiveSize;
+ int securityEnvelopeFilePosition;
+ int fileSpecPosition;
+ int securityEnvelopeLength;
+ int encryptionVersion;
+ int lastChapter;
+ int arjProtectionFactor;
+ int arjFlags2;
+ String name;
+ String comment;
+ byte[] extendedHeaderBytes = null;
+
+ static class Flags {
+ static final int GARBLED = 0x01;
+ static final int OLD_SECURED_NEW_ANSI_PAGE = 0x02;
+ static final int VOLUME = 0x04;
+ static final int ARJPROT = 0x08;
+ static final int PATHSYM = 0x10;
+ static final int BACKUP = 0x20;
+ static final int SECURED = 0x40;
+ static final int ALTNAME = 0x80;
+ }
+
+
+ @Override
+ public String toString() {
+ final StringBuilder builder = new StringBuilder();
+ builder.append("MainHeader [archiverVersionNumber=");
+ builder.append(archiverVersionNumber);
+ builder.append(", minVersionToExtract=");
+ builder.append(minVersionToExtract);
+ builder.append(", hostOS=");
+ builder.append(hostOS);
+ builder.append(", arjFlags=");
+ builder.append(arjFlags);
+ builder.append(", securityVersion=");
+ builder.append(securityVersion);
+ builder.append(", fileType=");
+ builder.append(fileType);
+ builder.append(", reserved=");
+ builder.append(reserved);
+ builder.append(", dateTimeCreated=");
+ builder.append(dateTimeCreated);
+ builder.append(", dateTimeModified=");
+ builder.append(dateTimeModified);
+ builder.append(", archiveSize=");
+ builder.append(archiveSize);
+ builder.append(", securityEnvelopeFilePosition=");
+ builder.append(securityEnvelopeFilePosition);
+ builder.append(", fileSpecPosition=");
+ builder.append(fileSpecPosition);
+ builder.append(", securityEnvelopeLength=");
+ builder.append(securityEnvelopeLength);
+ builder.append(", encryptionVersion=");
+ builder.append(encryptionVersion);
+ builder.append(", lastChapter=");
+ builder.append(lastChapter);
+ builder.append(", arjProtectionFactor=");
+ builder.append(arjProtectionFactor);
+ builder.append(", arjFlags2=");
+ builder.append(arjFlags2);
+ builder.append(", name=");
+ builder.append(name);
+ builder.append(", comment=");
+ builder.append(comment);
+ builder.append(", extendedHeaderBytes=");
+ builder.append(Arrays.toString(extendedHeaderBytes));
+ builder.append("]");
+ return builder.toString();
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/package.html
new file mode 100644
index 000000000..de18f61d8
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/arj/package.html
@@ -0,0 +1,24 @@
+
+
+
+ Provides stream classes for reading archives using
+ the ARJ format.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioArchiveEntry.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioArchiveEntry.java
new file mode 100644
index 000000000..641fae447
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioArchiveEntry.java
@@ -0,0 +1,892 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.cpio;
+
+import java.io.File;
+import java.util.Date;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+
+/**
+ * A cpio archive consists of a sequence of files. There are several types of
+ * headers defided in two categories of new and old format. The headers are
+ * recognized by magic numbers:
+ *
+ *
+ * - "070701" ASCII for new portable format
+ * - "070702" ASCII for new portable format with CRC
+ * - "070707" ASCII for old ascii (also known as Portable ASCII, odc or old
+ * character format
+ * - 070707 binary for old binary
+ *
+ *
+ * The old binary format is limited to 16 bits for user id, group
+ * id, device, and inode numbers. It is limited to 4 gigabyte file
+ * sizes.
+ *
+ * The old ASCII format is limited to 18 bits for the user id, group
+ * id, device, and inode numbers. It is limited to 8 gigabyte file
+ * sizes.
+ *
+ * The new ASCII format is limited to 4 gigabyte file sizes.
+ *
+ * CPIO 2.5 knows also about tar, but it is not recognized here.
+ *
+ *
+ * OLD FORMAT
+ *
+ * Each file has a 76 (ascii) / 26 (binary) byte header, a variable
+ * length, NUL terminated filename, and variable length file data. A
+ * header for a filename "TRAILER!!!" indicates the end of the
+ * archive.
+ *
+ * All the fields in the header are ISO 646 (approximately ASCII)
+ * strings of octal numbers, left padded, not NUL terminated.
+ *
+ *
+ * FIELDNAME NOTES
+ * c_magic The integer value octal 070707. This value can be used to deter-
+ * mine whether this archive is written with little-endian or big-
+ * endian integers.
+ * c_dev Device that contains a directory entry for this file
+ * c_ino I-node number that identifies the input file to the file system
+ * c_mode The mode specifies both the regular permissions and the file type.
+ * c_uid Numeric User ID of the owner of the input file
+ * c_gid Numeric Group ID of the owner of the input file
+ * c_nlink Number of links that are connected to the input file
+ * c_rdev For block special and character special entries, this field
+ * contains the associated device number. For all other entry types,
+ * it should be set to zero by writers and ignored by readers.
+ * c_mtime[2] Modification time of the file, indicated as the number of seconds
+ * since the start of the epoch, 00:00:00 UTC January 1, 1970. The
+ * four-byte integer is stored with the most-significant 16 bits
+ * first followed by the least-significant 16 bits. Each of the two
+ * 16 bit values are stored in machine-native byte order.
+ * c_namesize Length of the path name, including the terminating null byte
+ * c_filesize[2] Length of the file in bytes. This is the length of the data
+ * section that follows the header structure. Must be 0 for
+ * FIFOs and directories
+ *
+ * All fields are unsigned short fields with 16-bit integer values
+ * apart from c_mtime and c_filesize which are 32-bit integer values
+ *
+ *
+ * If necessary, the filename and file data are padded with a NUL byte to an even length
+ *
+ * Special files, directories, and the trailer are recorded with
+ * the h_filesize field equal to 0.
+ *
+ * In the ASCII version of this format, the 16-bit entries are represented as 6-byte octal numbers,
+ * and the 32-bit entries are represented as 11-byte octal numbers. No padding is added.
+ *
+ * NEW FORMAT
+ *
+ * Each file has a 110 byte header, a variable length, NUL
+ * terminated filename, and variable length file data. A header for a
+ * filename "TRAILER!!!" indicates the end of the archive. All the
+ * fields in the header are ISO 646 (approximately ASCII) strings of
+ * hexadecimal numbers, left padded, not NUL terminated.
+ *
+ *
+ * FIELDNAME NOTES
+ * c_magic[6] The string 070701 for new ASCII, the string 070702 for new ASCII with CRC
+ * c_ino[8]
+ * c_mode[8]
+ * c_uid[8]
+ * c_gid[8]
+ * c_nlink[8]
+ * c_mtim[8]
+ * c_filesize[8] must be 0 for FIFOs and directories
+ * c_maj[8]
+ * c_min[8]
+ * c_rmaj[8] only valid for chr and blk special files
+ * c_rmin[8] only valid for chr and blk special files
+ * c_namesize[8] count includes terminating NUL in pathname
+ * c_check[8] 0 for "new" portable format; for CRC format
+ * the sum of all the bytes in the file
+ *
+ *
+ * New ASCII Format The "new" ASCII format uses 8-byte hexadecimal
+ * fields for all numbers and separates device numbers into separate
+ * fields for major and minor numbers.
+ *
+ * The pathname is followed by NUL bytes so that the total size of
+ * the fixed header plus pathname is a multiple of four. Likewise, the
+ * file data is padded to a multiple of four bytes.
+ *
+ * This class uses mutable fields and is not considered to be
+ * threadsafe.
+ *
+ * Based on code from the jRPM project (http://jrpm.sourceforge.net).
+ *
+ * The MAGIC numbers and other constants are defined in {@link CpioConstants}
+ *
+ *
+ * N.B. does not handle the cpio "tar" format
+ *
+ * @NotThreadSafe
+ * @see http://people.freebsd.org/~kientzle/libarchive/man/cpio.5.txt
+ */
+public class CpioArchiveEntry implements CpioConstants, ArchiveEntry {
+
+ // Header description fields - should be same throughout an archive
+
+ /**
+ * See constructor documenation for possible values.
+ */
+ private final short fileFormat;
+
+ /** The number of bytes in each header record; depends on the file format */
+ private final int headerSize;
+
+ /** The boundary to which the header and data elements are aligned: 0, 2 or 4 bytes */
+ private final int alignmentBoundary;
+
+ // Header fields
+
+ private long chksum = 0;
+
+ /** Number of bytes in the file */
+ private long filesize = 0;
+
+ private long gid = 0;
+
+ private long inode = 0;
+
+ private long maj = 0;
+
+ private long min = 0;
+
+ private long mode = 0;
+
+ private long mtime = 0;
+
+ private String name;
+
+ private long nlink = 0;
+
+ private long rmaj = 0;
+
+ private long rmin = 0;
+
+ private long uid = 0;
+
+ /**
+ * Creates a CPIOArchiveEntry with a specified format.
+ *
+ * @param format
+ * The cpio format for this entry.
+ *
+ * Possible format values are:
+ *
+ * CpioConstants.FORMAT_NEW
+ * CpioConstants.FORMAT_NEW_CRC
+ * CpioConstants.FORMAT_OLD_BINARY
+ * CpioConstants.FORMAT_OLD_ASCII
+ *
+ */
+ public CpioArchiveEntry(final short format) {
+ switch (format) {
+ case FORMAT_NEW:
+ this.headerSize = 110;
+ this.alignmentBoundary = 4;
+ break;
+ case FORMAT_NEW_CRC:
+ this.headerSize = 110;
+ this.alignmentBoundary = 4;
+ break;
+ case FORMAT_OLD_ASCII:
+ this.headerSize = 76;
+ this.alignmentBoundary = 0;
+ break;
+ case FORMAT_OLD_BINARY:
+ this.headerSize = 26;
+ this.alignmentBoundary = 2;
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown header type");
+ }
+ this.fileFormat = format;
+ }
+
+ /**
+ * Creates a CPIOArchiveEntry with a specified name. The format of
+ * this entry will be the new format.
+ *
+ * @param name
+ * The name of this entry.
+ */
+ public CpioArchiveEntry(final String name) {
+ this(FORMAT_NEW, name);
+ }
+
+ /**
+ * Creates a CPIOArchiveEntry with a specified name.
+ *
+ * @param format
+ * The cpio format for this entry.
+ * @param name
+ * The name of this entry.
+ *
+ * Possible format values are:
+ *
+ * CpioConstants.FORMAT_NEW
+ * CpioConstants.FORMAT_NEW_CRC
+ * CpioConstants.FORMAT_OLD_BINARY
+ * CpioConstants.FORMAT_OLD_ASCII
+ *
+ *
+ * @since 1.1
+ */
+ public CpioArchiveEntry(final short format, final String name) {
+ this(format);
+ this.name = name;
+ }
+
+ /**
+ * Creates a CPIOArchiveEntry with a specified name. The format of
+ * this entry will be the new format.
+ *
+ * @param name
+ * The name of this entry.
+ * @param size
+ * The size of this entry
+ */
+ public CpioArchiveEntry(final String name, final long size) {
+ this(name);
+ this.setSize(size);
+ }
+
+ /**
+ * Creates a CPIOArchiveEntry with a specified name.
+ *
+ * @param format
+ * The cpio format for this entry.
+ * @param name
+ * The name of this entry.
+ * @param size
+ * The size of this entry
+ *
+ * Possible format values are:
+ *
+ * CpioConstants.FORMAT_NEW
+ * CpioConstants.FORMAT_NEW_CRC
+ * CpioConstants.FORMAT_OLD_BINARY
+ * CpioConstants.FORMAT_OLD_ASCII
+ *
+ *
+ * @since 1.1
+ */
+ public CpioArchiveEntry(final short format, final String name,
+ final long size) {
+ this(format, name);
+ this.setSize(size);
+ }
+
+ /**
+ * Creates a CPIOArchiveEntry with a specified name for a
+ * specified file. The format of this entry will be the new
+ * format.
+ *
+ * @param inputFile
+ * The file to gather information from.
+ * @param entryName
+ * The name of this entry.
+ */
+ public CpioArchiveEntry(File inputFile, String entryName) {
+ this(FORMAT_NEW, inputFile, entryName);
+ }
+
+ /**
+ * Creates a CPIOArchiveEntry with a specified name for a
+ * specified file.
+ *
+ * @param format
+ * The cpio format for this entry.
+ * @param inputFile
+ * The file to gather information from.
+ * @param entryName
+ * The name of this entry.
+ *
+ * Possible format values are:
+ *
+ * CpioConstants.FORMAT_NEW
+ * CpioConstants.FORMAT_NEW_CRC
+ * CpioConstants.FORMAT_OLD_BINARY
+ * CpioConstants.FORMAT_OLD_ASCII
+ *
+ *
+ * @since 1.1
+ */
+ public CpioArchiveEntry(final short format, File inputFile,
+ String entryName) {
+ this(format, entryName, inputFile.isFile() ? inputFile.length() : 0);
+ long mode=0;
+ if (inputFile.isDirectory()){
+ mode |= C_ISDIR;
+ } else if (inputFile.isFile()){
+ mode |= C_ISREG;
+ } else {
+ throw new IllegalArgumentException("Cannot determine type of file "
+ + inputFile.getName());
+ }
+ // TODO set other fields as needed
+ setMode(mode);
+ setTime(inputFile.lastModified() / 1000);
+ }
+
+ /**
+ * Check if the method is allowed for the defined format.
+ */
+ private void checkNewFormat() {
+ if ((this.fileFormat & FORMAT_NEW_MASK) == 0) {
+ throw new UnsupportedOperationException();
+ }
+ }
+
+ /**
+ * Check if the method is allowed for the defined format.
+ */
+ private void checkOldFormat() {
+ if ((this.fileFormat & FORMAT_OLD_MASK) == 0) {
+ throw new UnsupportedOperationException();
+ }
+ }
+
+ /**
+ * Get the checksum.
+ * Only supported for the new formats.
+ *
+ * @return Returns the checksum.
+ * @throws UnsupportedOperationException if the format is not a new format
+ */
+ public long getChksum() {
+ checkNewFormat();
+ return this.chksum;
+ }
+
+ /**
+ * Get the device id.
+ *
+ * @return Returns the device id.
+ * @throws UnsupportedOperationException
+ * if this method is called for a CPIOArchiveEntry with a new
+ * format.
+ */
+ public long getDevice() {
+ checkOldFormat();
+ return this.min;
+ }
+
+ /**
+ * Get the major device id.
+ *
+ * @return Returns the major device id.
+ * @throws UnsupportedOperationException
+ * if this method is called for a CPIOArchiveEntry with an old
+ * format.
+ */
+ public long getDeviceMaj() {
+ checkNewFormat();
+ return this.maj;
+ }
+
+ /**
+ * Get the minor device id
+ *
+ * @return Returns the minor device id.
+ * @throws UnsupportedOperationException if format is not a new format
+ */
+ public long getDeviceMin() {
+ checkNewFormat();
+ return this.min;
+ }
+
+ /**
+ * Get the filesize.
+ *
+ * @return Returns the filesize.
+ * @see org.apache.commons.compress.archivers.ArchiveEntry#getSize()
+ */
+ public long getSize() {
+ return this.filesize;
+ }
+
+ /**
+ * Get the format for this entry.
+ *
+ * @return Returns the format.
+ */
+ public short getFormat() {
+ return this.fileFormat;
+ }
+
+ /**
+ * Get the group id.
+ *
+ * @return Returns the group id.
+ */
+ public long getGID() {
+ return this.gid;
+ }
+
+ /**
+ * Get the header size for this CPIO format
+ *
+ * @return Returns the header size in bytes.
+ */
+ public int getHeaderSize() {
+ return this.headerSize;
+ }
+
+ /**
+ * Get the alignment boundary for this CPIO format
+ *
+ * @return Returns the aligment boundary (0, 2, 4) in bytes
+ */
+ public int getAlignmentBoundary() {
+ return this.alignmentBoundary;
+ }
+
+ /**
+ * Get the number of bytes needed to pad the header to the alignment boundary.
+ *
+ * @return the number of bytes needed to pad the header (0,1,2,3)
+ */
+ public int getHeaderPadCount(){
+ if (this.alignmentBoundary == 0) { return 0; }
+ int size = this.headerSize+this.name.length()+1; // Name has terminating null
+ int remain = size % this.alignmentBoundary;
+ if (remain > 0){
+ return this.alignmentBoundary - remain;
+ }
+ return 0;
+ }
+
+ /**
+ * Get the number of bytes needed to pad the data to the alignment boundary.
+ *
+ * @return the number of bytes needed to pad the data (0,1,2,3)
+ */
+ public int getDataPadCount(){
+ if (this.alignmentBoundary == 0) { return 0; }
+ long size = this.filesize;
+ int remain = (int) (size % this.alignmentBoundary);
+ if (remain > 0){
+ return this.alignmentBoundary - remain;
+ }
+ return 0;
+ }
+
+ /**
+ * Set the inode.
+ *
+ * @return Returns the inode.
+ */
+ public long getInode() {
+ return this.inode;
+ }
+
+ /**
+ * Get the mode of this entry (e.g. directory, regular file).
+ *
+ * @return Returns the mode.
+ */
+ public long getMode() {
+ return mode == 0 && !CPIO_TRAILER.equals(name) ? C_ISREG : mode;
+ }
+
+ /**
+ * Get the name.
+ *
+ * @return Returns the name.
+ */
+ public String getName() {
+ return this.name;
+ }
+
+ /**
+ * Get the number of links.
+ *
+ * @return Returns the number of links.
+ */
+ public long getNumberOfLinks() {
+ return nlink == 0 ?
+ isDirectory() ? 2 : 1
+ : nlink;
+ }
+
+ /**
+ * Get the remote device id.
+ *
+ * @return Returns the remote device id.
+ * @throws UnsupportedOperationException
+ * if this method is called for a CPIOArchiveEntry with a new
+ * format.
+ */
+ public long getRemoteDevice() {
+ checkOldFormat();
+ return this.rmin;
+ }
+
+ /**
+ * Get the remote major device id.
+ *
+ * @return Returns the remote major device id.
+ * @throws UnsupportedOperationException
+ * if this method is called for a CPIOArchiveEntry with an old
+ * format.
+ */
+ public long getRemoteDeviceMaj() {
+ checkNewFormat();
+ return this.rmaj;
+ }
+
+ /**
+ * Get the remote minor device id.
+ *
+ * @return Returns the remote minor device id.
+ * @throws UnsupportedOperationException
+ * if this method is called for a CPIOArchiveEntry with an old
+ * format.
+ */
+ public long getRemoteDeviceMin() {
+ checkNewFormat();
+ return this.rmin;
+ }
+
+ /**
+ * Get the time in seconds.
+ *
+ * @return Returns the time.
+ */
+ public long getTime() {
+ return this.mtime;
+ }
+
+ public Date getLastModifiedDate() {
+ return new Date(1000 * getTime());
+ }
+
+ /**
+ * Get the user id.
+ *
+ * @return Returns the user id.
+ */
+ public long getUID() {
+ return this.uid;
+ }
+
+ /**
+ * Check if this entry represents a block device.
+ *
+ * @return TRUE if this entry is a block device.
+ */
+ public boolean isBlockDevice() {
+ return CpioUtil.fileType(mode) == C_ISBLK;
+ }
+
+ /**
+ * Check if this entry represents a character device.
+ *
+ * @return TRUE if this entry is a character device.
+ */
+ public boolean isCharacterDevice() {
+ return CpioUtil.fileType(mode) == C_ISCHR;
+ }
+
+ /**
+ * Check if this entry represents a directory.
+ *
+ * @return TRUE if this entry is a directory.
+ */
+ public boolean isDirectory() {
+ return CpioUtil.fileType(mode) == C_ISDIR;
+ }
+
+ /**
+ * Check if this entry represents a network device.
+ *
+ * @return TRUE if this entry is a network device.
+ */
+ public boolean isNetwork() {
+ return CpioUtil.fileType(mode) == C_ISNWK;
+ }
+
+ /**
+ * Check if this entry represents a pipe.
+ *
+ * @return TRUE if this entry is a pipe.
+ */
+ public boolean isPipe() {
+ return CpioUtil.fileType(mode) == C_ISFIFO;
+ }
+
+ /**
+ * Check if this entry represents a regular file.
+ *
+ * @return TRUE if this entry is a regular file.
+ */
+ public boolean isRegularFile() {
+ return CpioUtil.fileType(mode) == C_ISREG;
+ }
+
+ /**
+ * Check if this entry represents a socket.
+ *
+ * @return TRUE if this entry is a socket.
+ */
+ public boolean isSocket() {
+ return CpioUtil.fileType(mode) == C_ISSOCK;
+ }
+
+ /**
+ * Check if this entry represents a symbolic link.
+ *
+ * @return TRUE if this entry is a symbolic link.
+ */
+ public boolean isSymbolicLink() {
+ return CpioUtil.fileType(mode) == C_ISLNK;
+ }
+
+ /**
+ * Set the checksum. The checksum is calculated by adding all bytes of a
+ * file to transfer (crc += buf[pos] & 0xFF).
+ *
+ * @param chksum
+ * The checksum to set.
+ */
+ public void setChksum(final long chksum) {
+ checkNewFormat();
+ this.chksum = chksum;
+ }
+
+ /**
+ * Set the device id.
+ *
+ * @param device
+ * The device id to set.
+ * @throws UnsupportedOperationException
+ * if this method is called for a CPIOArchiveEntry with a new
+ * format.
+ */
+ public void setDevice(final long device) {
+ checkOldFormat();
+ this.min = device;
+ }
+
+ /**
+ * Set major device id.
+ *
+ * @param maj
+ * The major device id to set.
+ */
+ public void setDeviceMaj(final long maj) {
+ checkNewFormat();
+ this.maj = maj;
+ }
+
+ /**
+ * Set the minor device id
+ *
+ * @param min
+ * The minor device id to set.
+ */
+ public void setDeviceMin(final long min) {
+ checkNewFormat();
+ this.min = min;
+ }
+
+ /**
+ * Set the filesize.
+ *
+ * @param size
+ * The filesize to set.
+ */
+ public void setSize(final long size) {
+ if (size < 0 || size > 0xFFFFFFFFL) {
+ throw new IllegalArgumentException("invalid entry size <" + size
+ + ">");
+ }
+ this.filesize = size;
+ }
+
+ /**
+ * Set the group id.
+ *
+ * @param gid
+ * The group id to set.
+ */
+ public void setGID(final long gid) {
+ this.gid = gid;
+ }
+
+ /**
+ * Set the inode.
+ *
+ * @param inode
+ * The inode to set.
+ */
+ public void setInode(final long inode) {
+ this.inode = inode;
+ }
+
+ /**
+ * Set the mode of this entry (e.g. directory, regular file).
+ *
+ * @param mode
+ * The mode to set.
+ */
+ public void setMode(final long mode) {
+ final long maskedMode = mode & S_IFMT;
+ switch ((int) maskedMode) {
+ case C_ISDIR:
+ case C_ISLNK:
+ case C_ISREG:
+ case C_ISFIFO:
+ case C_ISCHR:
+ case C_ISBLK:
+ case C_ISSOCK:
+ case C_ISNWK:
+ break;
+ default:
+ throw new IllegalArgumentException(
+ "Unknown mode. "
+ + "Full: " + Long.toHexString(mode)
+ + " Masked: " + Long.toHexString(maskedMode));
+ }
+
+ this.mode = mode;
+ }
+
+ /**
+ * Set the name.
+ *
+ * @param name
+ * The name to set.
+ */
+ public void setName(final String name) {
+ this.name = name;
+ }
+
+ /**
+ * Set the number of links.
+ *
+ * @param nlink
+ * The number of links to set.
+ */
+ public void setNumberOfLinks(final long nlink) {
+ this.nlink = nlink;
+ }
+
+ /**
+ * Set the remote device id.
+ *
+ * @param device
+ * The remote device id to set.
+ * @throws UnsupportedOperationException
+ * if this method is called for a CPIOArchiveEntry with a new
+ * format.
+ */
+ public void setRemoteDevice(final long device) {
+ checkOldFormat();
+ this.rmin = device;
+ }
+
+ /**
+ * Set the remote major device id.
+ *
+ * @param rmaj
+ * The remote major device id to set.
+ * @throws UnsupportedOperationException
+ * if this method is called for a CPIOArchiveEntry with an old
+ * format.
+ */
+ public void setRemoteDeviceMaj(final long rmaj) {
+ checkNewFormat();
+ this.rmaj = rmaj;
+ }
+
+ /**
+ * Set the remote minor device id.
+ *
+ * @param rmin
+ * The remote minor device id to set.
+ * @throws UnsupportedOperationException
+ * if this method is called for a CPIOArchiveEntry with an old
+ * format.
+ */
+ public void setRemoteDeviceMin(final long rmin) {
+ checkNewFormat();
+ this.rmin = rmin;
+ }
+
+ /**
+ * Set the time in seconds.
+ *
+ * @param time
+ * The time to set.
+ */
+ public void setTime(final long time) {
+ this.mtime = time;
+ }
+
+ /**
+ * Set the user id.
+ *
+ * @param uid
+ * The user id to set.
+ */
+ public void setUID(final long uid) {
+ this.uid = uid;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#hashCode()
+ */
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + (name == null ? 0 : name.hashCode());
+ return result;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#equals(java.lang.Object)
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ CpioArchiveEntry other = (CpioArchiveEntry) obj;
+ if (name == null) {
+ if (other.name != null) {
+ return false;
+ }
+ } else if (!name.equals(other.name)) {
+ return false;
+ }
+ return true;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioArchiveInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioArchiveInputStream.java
new file mode 100644
index 000000000..0d7e4ba1f
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioArchiveInputStream.java
@@ -0,0 +1,562 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.cpio;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveInputStream;
+import org.apache.commons.compress.archivers.zip.ZipEncoding;
+import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
+import org.apache.commons.compress.utils.ArchiveUtils;
+import org.apache.commons.compress.utils.CharsetNames;
+import org.apache.commons.compress.utils.IOUtils;
+
+/**
+ * CPIOArchiveInputStream is a stream for reading cpio streams. All formats of
+ * cpio are supported (old ascii, old binary, new portable format and the new
+ * portable format with crc).
+ *
+ *
+ * The stream can be read by extracting a cpio entry (containing all
+ * informations about a entry) and afterwards reading from the stream the file
+ * specified by the entry.
+ *
+ *
+ * CPIOArchiveInputStream cpioIn = new CPIOArchiveInputStream(
+ * new FileInputStream(new File("test.cpio")));
+ * CPIOArchiveEntry cpioEntry;
+ *
+ * while ((cpioEntry = cpioIn.getNextEntry()) != null) {
+ * System.out.println(cpioEntry.getName());
+ * int tmp;
+ * StringBuilder buf = new StringBuilder();
+ * while ((tmp = cpIn.read()) != -1) {
+ * buf.append((char) tmp);
+ * }
+ * System.out.println(buf.toString());
+ * }
+ * cpioIn.close();
+ *
+ *
+ * Note: This implementation should be compatible to cpio 2.5
+ *
+ *
This class uses mutable fields and is not considered to be threadsafe.
+ *
+ *
Based on code from the jRPM project (jrpm.sourceforge.net)
+ */
+
+public class CpioArchiveInputStream extends ArchiveInputStream implements
+ CpioConstants {
+
+ private boolean closed = false;
+
+ private CpioArchiveEntry entry;
+
+ private long entryBytesRead = 0;
+
+ private boolean entryEOF = false;
+
+ private final byte tmpbuf[] = new byte[4096];
+
+ private long crc = 0;
+
+ private final InputStream in;
+
+ // cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection)
+ private final byte[] TWO_BYTES_BUF = new byte[2];
+ private final byte[] FOUR_BYTES_BUF = new byte[4];
+ private final byte[] SIX_BYTES_BUF = new byte[6];
+
+ private final int blockSize;
+
+ /**
+ * The encoding to use for filenames and labels.
+ */
+ private final ZipEncoding encoding;
+
+ /**
+ * Construct the cpio input stream with a blocksize of {@link
+ * CpioConstants#BLOCK_SIZE BLOCK_SIZE} and expecting ASCII file
+ * names.
+ *
+ * @param in
+ * The cpio stream
+ */
+ public CpioArchiveInputStream(final InputStream in) {
+ this(in, BLOCK_SIZE, CharsetNames.US_ASCII);
+ }
+
+ /**
+ * Construct the cpio input stream with a blocksize of {@link
+ * CpioConstants#BLOCK_SIZE BLOCK_SIZE}.
+ *
+ * @param in
+ * The cpio stream
+ * @param encoding
+ * The encoding of file names to expect - use null for
+ * the platform's default.
+ * @since 1.6
+ */
+ public CpioArchiveInputStream(final InputStream in, String encoding) {
+ this(in, BLOCK_SIZE, encoding);
+ }
+
+ /**
+ * Construct the cpio input stream with a blocksize of {@link
+ * CpioConstants#BLOCK_SIZE BLOCK_SIZE} expecting ASCII file
+ * names.
+ *
+ * @param in
+ * The cpio stream
+ * @param blockSize
+ * The block size of the archive.
+ * @since 1.5
+ */
+ public CpioArchiveInputStream(final InputStream in, int blockSize) {
+ this(in, blockSize, CharsetNames.US_ASCII);
+ }
+
+ /**
+ * Construct the cpio input stream with a blocksize of {@link CpioConstants#BLOCK_SIZE BLOCK_SIZE}.
+ *
+ * @param in
+ * The cpio stream
+ * @param blockSize
+ * The block size of the archive.
+ * @param encoding
+ * The encoding of file names to expect - use null for
+ * the platform's default.
+ * @since 1.6
+ */
+ public CpioArchiveInputStream(final InputStream in, int blockSize, String encoding) {
+ this.in = in;
+ this.blockSize = blockSize;
+ this.encoding = ZipEncodingHelper.getZipEncoding(encoding);
+ }
+
+ /**
+ * Returns 0 after EOF has reached for the current entry data, otherwise
+ * always return 1.
+ *
+ * Programs should not count on this method to return the actual number of
+ * bytes that could be read without blocking.
+ *
+ * @return 1 before EOF and 0 after EOF has reached for current entry.
+ * @throws IOException
+ * if an I/O error has occurred or if a CPIO file error has
+ * occurred
+ */
+ @Override
+ public int available() throws IOException {
+ ensureOpen();
+ if (this.entryEOF) {
+ return 0;
+ }
+ return 1;
+ }
+
+ /**
+ * Closes the CPIO input stream.
+ *
+ * @throws IOException
+ * if an I/O error has occurred
+ */
+ @Override
+ public void close() throws IOException {
+ if (!this.closed) {
+ in.close();
+ this.closed = true;
+ }
+ }
+
+ /**
+ * Closes the current CPIO entry and positions the stream for reading the
+ * next entry.
+ *
+ * @throws IOException
+ * if an I/O error has occurred or if a CPIO file error has
+ * occurred
+ */
+ private void closeEntry() throws IOException {
+ // the skip implementation of this class will not skip more
+ // than Integer.MAX_VALUE bytes
+ while (skip((long) Integer.MAX_VALUE) == Integer.MAX_VALUE) { // NOPMD
+ // do nothing
+ }
+ }
+
+ /**
+ * Check to make sure that this stream has not been closed
+ *
+ * @throws IOException
+ * if the stream is already closed
+ */
+ private void ensureOpen() throws IOException {
+ if (this.closed) {
+ throw new IOException("Stream closed");
+ }
+ }
+
+ /**
+ * Reads the next CPIO file entry and positions stream at the beginning of
+ * the entry data.
+ *
+ * @return the CPIOArchiveEntry just read
+ * @throws IOException
+ * if an I/O error has occurred or if a CPIO file error has
+ * occurred
+ */
+ public CpioArchiveEntry getNextCPIOEntry() throws IOException {
+ ensureOpen();
+ if (this.entry != null) {
+ closeEntry();
+ }
+ readFully(TWO_BYTES_BUF, 0, TWO_BYTES_BUF.length);
+ if (CpioUtil.byteArray2long(TWO_BYTES_BUF, false) == MAGIC_OLD_BINARY) {
+ this.entry = readOldBinaryEntry(false);
+ } else if (CpioUtil.byteArray2long(TWO_BYTES_BUF, true)
+ == MAGIC_OLD_BINARY) {
+ this.entry = readOldBinaryEntry(true);
+ } else {
+ System.arraycopy(TWO_BYTES_BUF, 0, SIX_BYTES_BUF, 0,
+ TWO_BYTES_BUF.length);
+ readFully(SIX_BYTES_BUF, TWO_BYTES_BUF.length,
+ FOUR_BYTES_BUF.length);
+ String magicString = ArchiveUtils.toAsciiString(SIX_BYTES_BUF);
+ if (magicString.equals(MAGIC_NEW)) {
+ this.entry = readNewEntry(false);
+ } else if (magicString.equals(MAGIC_NEW_CRC)) {
+ this.entry = readNewEntry(true);
+ } else if (magicString.equals(MAGIC_OLD_ASCII)) {
+ this.entry = readOldAsciiEntry();
+ } else {
+ throw new IOException("Unknown magic [" + magicString + "]. Occured at byte: " + getBytesRead());
+ }
+ }
+
+ this.entryBytesRead = 0;
+ this.entryEOF = false;
+ this.crc = 0;
+
+ if (this.entry.getName().equals(CPIO_TRAILER)) {
+ this.entryEOF = true;
+ skipRemainderOfLastBlock();
+ return null;
+ }
+ return this.entry;
+ }
+
+ private void skip(int bytes) throws IOException{
+ // bytes cannot be more than 3 bytes
+ if (bytes > 0) {
+ readFully(FOUR_BYTES_BUF, 0, bytes);
+ }
+ }
+
+ /**
+ * Reads from the current CPIO entry into an array of bytes. Blocks until
+ * some input is available.
+ *
+ * @param b
+ * the buffer into which the data is read
+ * @param off
+ * the start offset of the data
+ * @param len
+ * the maximum number of bytes read
+ * @return the actual number of bytes read, or -1 if the end of the entry is
+ * reached
+ * @throws IOException
+ * if an I/O error has occurred or if a CPIO file error has
+ * occurred
+ */
+ @Override
+ public int read(final byte[] b, final int off, final int len)
+ throws IOException {
+ ensureOpen();
+ if (off < 0 || len < 0 || off > b.length - len) {
+ throw new IndexOutOfBoundsException();
+ } else if (len == 0) {
+ return 0;
+ }
+
+ if (this.entry == null || this.entryEOF) {
+ return -1;
+ }
+ if (this.entryBytesRead == this.entry.getSize()) {
+ skip(entry.getDataPadCount());
+ this.entryEOF = true;
+ if (this.entry.getFormat() == FORMAT_NEW_CRC
+ && this.crc != this.entry.getChksum()) {
+ throw new IOException("CRC Error. Occured at byte: "
+ + getBytesRead());
+ }
+ return -1; // EOF for this entry
+ }
+ int tmplength = (int) Math.min(len, this.entry.getSize()
+ - this.entryBytesRead);
+ if (tmplength < 0) {
+ return -1;
+ }
+
+ int tmpread = readFully(b, off, tmplength);
+ if (this.entry.getFormat() == FORMAT_NEW_CRC) {
+ for (int pos = 0; pos < tmpread; pos++) {
+ this.crc += b[pos] & 0xFF;
+ }
+ }
+ this.entryBytesRead += tmpread;
+
+ return tmpread;
+ }
+
+ private final int readFully(final byte[] b, final int off, final int len)
+ throws IOException {
+ int count = IOUtils.readFully(in, b, off, len);
+ count(count);
+ if (count < len) {
+ throw new EOFException();
+ }
+ return count;
+ }
+
+ private long readBinaryLong(final int length, final boolean swapHalfWord)
+ throws IOException {
+ byte tmp[] = new byte[length];
+ readFully(tmp, 0, tmp.length);
+ return CpioUtil.byteArray2long(tmp, swapHalfWord);
+ }
+
+ private long readAsciiLong(final int length, final int radix)
+ throws IOException {
+ byte tmpBuffer[] = new byte[length];
+ readFully(tmpBuffer, 0, tmpBuffer.length);
+ return Long.parseLong(ArchiveUtils.toAsciiString(tmpBuffer), radix);
+ }
+
+ private CpioArchiveEntry readNewEntry(final boolean hasCrc)
+ throws IOException {
+ CpioArchiveEntry ret;
+ if (hasCrc) {
+ ret = new CpioArchiveEntry(FORMAT_NEW_CRC);
+ } else {
+ ret = new CpioArchiveEntry(FORMAT_NEW);
+ }
+
+ ret.setInode(readAsciiLong(8, 16));
+ long mode = readAsciiLong(8, 16);
+ if (CpioUtil.fileType(mode) != 0){ // mode is initialised to 0
+ ret.setMode(mode);
+ }
+ ret.setUID(readAsciiLong(8, 16));
+ ret.setGID(readAsciiLong(8, 16));
+ ret.setNumberOfLinks(readAsciiLong(8, 16));
+ ret.setTime(readAsciiLong(8, 16));
+ ret.setSize(readAsciiLong(8, 16));
+ ret.setDeviceMaj(readAsciiLong(8, 16));
+ ret.setDeviceMin(readAsciiLong(8, 16));
+ ret.setRemoteDeviceMaj(readAsciiLong(8, 16));
+ ret.setRemoteDeviceMin(readAsciiLong(8, 16));
+ long namesize = readAsciiLong(8, 16);
+ ret.setChksum(readAsciiLong(8, 16));
+ String name = readCString((int) namesize);
+ ret.setName(name);
+ if (CpioUtil.fileType(mode) == 0 && !name.equals(CPIO_TRAILER)){
+ throw new IOException("Mode 0 only allowed in the trailer. Found entry name: "+name + " Occured at byte: " + getBytesRead());
+ }
+ skip(ret.getHeaderPadCount());
+
+ return ret;
+ }
+
+ private CpioArchiveEntry readOldAsciiEntry() throws IOException {
+ CpioArchiveEntry ret = new CpioArchiveEntry(FORMAT_OLD_ASCII);
+
+ ret.setDevice(readAsciiLong(6, 8));
+ ret.setInode(readAsciiLong(6, 8));
+ final long mode = readAsciiLong(6, 8);
+ if (CpioUtil.fileType(mode) != 0) {
+ ret.setMode(mode);
+ }
+ ret.setUID(readAsciiLong(6, 8));
+ ret.setGID(readAsciiLong(6, 8));
+ ret.setNumberOfLinks(readAsciiLong(6, 8));
+ ret.setRemoteDevice(readAsciiLong(6, 8));
+ ret.setTime(readAsciiLong(11, 8));
+ long namesize = readAsciiLong(6, 8);
+ ret.setSize(readAsciiLong(11, 8));
+ final String name = readCString((int) namesize);
+ ret.setName(name);
+ if (CpioUtil.fileType(mode) == 0 && !name.equals(CPIO_TRAILER)){
+ throw new IOException("Mode 0 only allowed in the trailer. Found entry: "+ name + " Occured at byte: " + getBytesRead());
+ }
+
+ return ret;
+ }
+
+ private CpioArchiveEntry readOldBinaryEntry(final boolean swapHalfWord)
+ throws IOException {
+ CpioArchiveEntry ret = new CpioArchiveEntry(FORMAT_OLD_BINARY);
+
+ ret.setDevice(readBinaryLong(2, swapHalfWord));
+ ret.setInode(readBinaryLong(2, swapHalfWord));
+ final long mode = readBinaryLong(2, swapHalfWord);
+ if (CpioUtil.fileType(mode) != 0){
+ ret.setMode(mode);
+ }
+ ret.setUID(readBinaryLong(2, swapHalfWord));
+ ret.setGID(readBinaryLong(2, swapHalfWord));
+ ret.setNumberOfLinks(readBinaryLong(2, swapHalfWord));
+ ret.setRemoteDevice(readBinaryLong(2, swapHalfWord));
+ ret.setTime(readBinaryLong(4, swapHalfWord));
+ long namesize = readBinaryLong(2, swapHalfWord);
+ ret.setSize(readBinaryLong(4, swapHalfWord));
+ final String name = readCString((int) namesize);
+ ret.setName(name);
+ if (CpioUtil.fileType(mode) == 0 && !name.equals(CPIO_TRAILER)){
+ throw new IOException("Mode 0 only allowed in the trailer. Found entry: "+name + "Occured at byte: " + getBytesRead());
+ }
+ skip(ret.getHeaderPadCount());
+
+ return ret;
+ }
+
+ private String readCString(final int length) throws IOException {
+ // don't include trailing NUL in file name to decode
+ byte tmpBuffer[] = new byte[length - 1];
+ readFully(tmpBuffer, 0, tmpBuffer.length);
+ this.in.read();
+ return encoding.decode(tmpBuffer);
+ }
+
+ /**
+ * Skips specified number of bytes in the current CPIO entry.
+ *
+ * @param n
+ * the number of bytes to skip
+ * @return the actual number of bytes skipped
+ * @throws IOException
+ * if an I/O error has occurred
+ * @throws IllegalArgumentException
+ * if n < 0
+ */
+ @Override
+ public long skip(final long n) throws IOException {
+ if (n < 0) {
+ throw new IllegalArgumentException("negative skip length");
+ }
+ ensureOpen();
+ int max = (int) Math.min(n, Integer.MAX_VALUE);
+ int total = 0;
+
+ while (total < max) {
+ int len = max - total;
+ if (len > this.tmpbuf.length) {
+ len = this.tmpbuf.length;
+ }
+ len = read(this.tmpbuf, 0, len);
+ if (len == -1) {
+ this.entryEOF = true;
+ break;
+ }
+ total += len;
+ }
+ return total;
+ }
+
+ @Override
+ public ArchiveEntry getNextEntry() throws IOException {
+ return getNextCPIOEntry();
+ }
+
+ /**
+ * Skips the padding zeros written after the TRAILER!!! entry.
+ */
+ private void skipRemainderOfLastBlock() throws IOException {
+ long readFromLastBlock = getBytesRead() % blockSize;
+ long remainingBytes = readFromLastBlock == 0 ? 0
+ : blockSize - readFromLastBlock;
+ while (remainingBytes > 0) {
+ long skipped = skip(blockSize - readFromLastBlock);
+ if (skipped <= 0) {
+ break;
+ }
+ remainingBytes -= skipped;
+ }
+ }
+
+ /**
+ * Checks if the signature matches one of the following magic values:
+ *
+ * Strings:
+ *
+ * "070701" - MAGIC_NEW
+ * "070702" - MAGIC_NEW_CRC
+ * "070707" - MAGIC_OLD_ASCII
+ *
+ * Octal Binary value:
+ *
+ * 070707 - MAGIC_OLD_BINARY (held as a short) = 0x71C7 or 0xC771
+ */
+ public static boolean matches(byte[] signature, int length) {
+ if (length < 6) {
+ return false;
+ }
+
+ // Check binary values
+ if (signature[0] == 0x71 && (signature[1] & 0xFF) == 0xc7) {
+ return true;
+ }
+ if (signature[1] == 0x71 && (signature[0] & 0xFF) == 0xc7) {
+ return true;
+ }
+
+ // Check Ascii (String) values
+ // 3037 3037 30nn
+ if (signature[0] != 0x30) {
+ return false;
+ }
+ if (signature[1] != 0x37) {
+ return false;
+ }
+ if (signature[2] != 0x30) {
+ return false;
+ }
+ if (signature[3] != 0x37) {
+ return false;
+ }
+ if (signature[4] != 0x30) {
+ return false;
+ }
+ // Check last byte
+ if (signature[5] == 0x31) {
+ return true;
+ }
+ if (signature[5] == 0x32) {
+ return true;
+ }
+ if (signature[5] == 0x37) {
+ return true;
+ }
+
+ return false;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioArchiveOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioArchiveOutputStream.java
new file mode 100644
index 000000000..ff86ddf9f
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioArchiveOutputStream.java
@@ -0,0 +1,558 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.cpio;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.zip.ZipEncoding;
+import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
+import org.apache.commons.compress.utils.ArchiveUtils;
+import org.apache.commons.compress.utils.CharsetNames;
+
+/**
+ * CPIOArchiveOutputStream is a stream for writing CPIO streams. All formats of
+ * CPIO are supported (old ASCII, old binary, new portable format and the new
+ * portable format with CRC).
+ *
+ *
An entry can be written by creating an instance of CpioArchiveEntry and fill
+ * it with the necessary values and put it into the CPIO stream. Afterwards
+ * write the contents of the file into the CPIO stream. Either close the stream
+ * by calling finish() or put a next entry into the cpio stream.
+ *
+ *
+ * CpioArchiveOutputStream out = new CpioArchiveOutputStream(
+ * new FileOutputStream(new File("test.cpio")));
+ * CpioArchiveEntry entry = new CpioArchiveEntry();
+ * entry.setName("testfile");
+ * String contents = "12345";
+ * entry.setFileSize(contents.length());
+ * entry.setMode(CpioConstants.C_ISREG); // regular file
+ * ... set other attributes, e.g. time, number of links
+ * out.putArchiveEntry(entry);
+ * out.write(testContents.getBytes());
+ * out.close();
+ *
+ *
+ * Note: This implementation should be compatible to cpio 2.5
+ *
+ * This class uses mutable fields and is not considered threadsafe.
+ *
+ * based on code from the jRPM project (jrpm.sourceforge.net)
+ */
+public class CpioArchiveOutputStream extends ArchiveOutputStream implements
+ CpioConstants {
+
+ private CpioArchiveEntry entry;
+
+ private boolean closed = false;
+
+ /** indicates if this archive is finished */
+ private boolean finished;
+
+ /**
+ * See {@link CpioArchiveEntry#setFormat(short)} for possible values.
+ */
+ private final short entryFormat;
+
+ private final HashMap names =
+ new HashMap();
+
+ private long crc = 0;
+
+ private long written;
+
+ private final OutputStream out;
+
+ private final int blockSize;
+
+ private long nextArtificalDeviceAndInode = 1;
+
+ /**
+ * The encoding to use for filenames and labels.
+ */
+ private final ZipEncoding encoding;
+
+ /**
+ * Construct the cpio output stream with a specified format, a
+ * blocksize of {@link CpioConstants#BLOCK_SIZE BLOCK_SIZE} and
+ * using ASCII as the file name encoding.
+ *
+ * @param out
+ * The cpio stream
+ * @param format
+ * The format of the stream
+ */
+ public CpioArchiveOutputStream(final OutputStream out, final short format) {
+ this(out, format, BLOCK_SIZE, CharsetNames.US_ASCII);
+ }
+
+ /**
+ * Construct the cpio output stream with a specified format using
+ * ASCII as the file name encoding.
+ *
+ * @param out
+ * The cpio stream
+ * @param format
+ * The format of the stream
+ * @param blockSize
+ * The block size of the archive.
+ *
+ * @since 1.1
+ */
+ public CpioArchiveOutputStream(final OutputStream out, final short format,
+ final int blockSize) {
+ this(out, format, blockSize, CharsetNames.US_ASCII);
+ }
+
+ /**
+ * Construct the cpio output stream with a specified format using
+ * ASCII as the file name encoding.
+ *
+ * @param out
+ * The cpio stream
+ * @param format
+ * The format of the stream
+ * @param blockSize
+ * The block size of the archive.
+ * @param encoding
+ * The encoding of file names to write - use null for
+ * the platform's default.
+ *
+ * @since 1.6
+ */
+ public CpioArchiveOutputStream(final OutputStream out, final short format,
+ final int blockSize, final String encoding) {
+ this.out = out;
+ switch (format) {
+ case FORMAT_NEW:
+ case FORMAT_NEW_CRC:
+ case FORMAT_OLD_ASCII:
+ case FORMAT_OLD_BINARY:
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown format: "+format);
+
+ }
+ this.entryFormat = format;
+ this.blockSize = blockSize;
+ this.encoding = ZipEncodingHelper.getZipEncoding(encoding);
+ }
+
+ /**
+ * Construct the cpio output stream. The format for this CPIO stream is the
+ * "new" format using ASCII encoding for file names
+ *
+ * @param out
+ * The cpio stream
+ */
+ public CpioArchiveOutputStream(final OutputStream out) {
+ this(out, FORMAT_NEW);
+ }
+
+ /**
+ * Construct the cpio output stream. The format for this CPIO stream is the
+ * "new" format.
+ *
+ * @param out
+ * The cpio stream
+ * @param encoding
+ * The encoding of file names to write - use null for
+ * the platform's default.
+ * @since 1.6
+ */
+ public CpioArchiveOutputStream(final OutputStream out, String encoding) {
+ this(out, FORMAT_NEW, BLOCK_SIZE, encoding);
+ }
+
+ /**
+ * Check to make sure that this stream has not been closed
+ *
+ * @throws IOException
+ * if the stream is already closed
+ */
+ private void ensureOpen() throws IOException {
+ if (this.closed) {
+ throw new IOException("Stream closed");
+ }
+ }
+
+ /**
+ * Begins writing a new CPIO file entry and positions the stream to the
+ * start of the entry data. Closes the current entry if still active. The
+ * current time will be used if the entry has no set modification time and
+ * the default header format will be used if no other format is specified in
+ * the entry.
+ *
+ * @param entry
+ * the CPIO cpioEntry to be written
+ * @throws IOException
+ * if an I/O error has occurred or if a CPIO file error has
+ * occurred
+ * @throws ClassCastException if entry is not an instance of CpioArchiveEntry
+ */
+ @Override
+ public void putArchiveEntry(ArchiveEntry entry) throws IOException {
+ if(finished) {
+ throw new IOException("Stream has already been finished");
+ }
+
+ CpioArchiveEntry e = (CpioArchiveEntry) entry;
+ ensureOpen();
+ if (this.entry != null) {
+ closeArchiveEntry(); // close previous entry
+ }
+ if (e.getTime() == -1) {
+ e.setTime(System.currentTimeMillis() / 1000);
+ }
+
+ final short format = e.getFormat();
+ if (format != this.entryFormat){
+ throw new IOException("Header format: "+format+" does not match existing format: "+this.entryFormat);
+ }
+
+ if (this.names.put(e.getName(), e) != null) {
+ throw new IOException("duplicate entry: " + e.getName());
+ }
+
+ writeHeader(e);
+ this.entry = e;
+ this.written = 0;
+ }
+
+ private void writeHeader(final CpioArchiveEntry e) throws IOException {
+ switch (e.getFormat()) {
+ case FORMAT_NEW:
+ out.write(ArchiveUtils.toAsciiBytes(MAGIC_NEW));
+ count(6);
+ writeNewEntry(e);
+ break;
+ case FORMAT_NEW_CRC:
+ out.write(ArchiveUtils.toAsciiBytes(MAGIC_NEW_CRC));
+ count(6);
+ writeNewEntry(e);
+ break;
+ case FORMAT_OLD_ASCII:
+ out.write(ArchiveUtils.toAsciiBytes(MAGIC_OLD_ASCII));
+ count(6);
+ writeOldAsciiEntry(e);
+ break;
+ case FORMAT_OLD_BINARY:
+ boolean swapHalfWord = true;
+ writeBinaryLong(MAGIC_OLD_BINARY, 2, swapHalfWord);
+ writeOldBinaryEntry(e, swapHalfWord);
+ break;
+ default:
+ throw new IOException("unknown format " + e.getFormat());
+ }
+ }
+
+ private void writeNewEntry(final CpioArchiveEntry entry) throws IOException {
+ long inode = entry.getInode();
+ long devMin = entry.getDeviceMin();
+ if (CPIO_TRAILER.equals(entry.getName())) {
+ inode = devMin = 0;
+ } else {
+ if (inode == 0 && devMin == 0) {
+ inode = nextArtificalDeviceAndInode & 0xFFFFFFFF;
+ devMin = (nextArtificalDeviceAndInode++ >> 32) & 0xFFFFFFFF;
+ } else {
+ nextArtificalDeviceAndInode =
+ Math.max(nextArtificalDeviceAndInode,
+ inode + 0x100000000L * devMin) + 1;
+ }
+ }
+
+ writeAsciiLong(inode, 8, 16);
+ writeAsciiLong(entry.getMode(), 8, 16);
+ writeAsciiLong(entry.getUID(), 8, 16);
+ writeAsciiLong(entry.getGID(), 8, 16);
+ writeAsciiLong(entry.getNumberOfLinks(), 8, 16);
+ writeAsciiLong(entry.getTime(), 8, 16);
+ writeAsciiLong(entry.getSize(), 8, 16);
+ writeAsciiLong(entry.getDeviceMaj(), 8, 16);
+ writeAsciiLong(devMin, 8, 16);
+ writeAsciiLong(entry.getRemoteDeviceMaj(), 8, 16);
+ writeAsciiLong(entry.getRemoteDeviceMin(), 8, 16);
+ writeAsciiLong(entry.getName().length() + 1, 8, 16);
+ writeAsciiLong(entry.getChksum(), 8, 16);
+ writeCString(entry.getName());
+ pad(entry.getHeaderPadCount());
+ }
+
+ private void writeOldAsciiEntry(final CpioArchiveEntry entry)
+ throws IOException {
+ long inode = entry.getInode();
+ long device = entry.getDevice();
+ if (CPIO_TRAILER.equals(entry.getName())) {
+ inode = device = 0;
+ } else {
+ if (inode == 0 && device == 0) {
+ inode = nextArtificalDeviceAndInode & 0777777;
+ device = (nextArtificalDeviceAndInode++ >> 18) & 0777777;
+ } else {
+ nextArtificalDeviceAndInode =
+ Math.max(nextArtificalDeviceAndInode,
+ inode + 01000000 * device) + 1;
+ }
+ }
+
+ writeAsciiLong(device, 6, 8);
+ writeAsciiLong(inode, 6, 8);
+ writeAsciiLong(entry.getMode(), 6, 8);
+ writeAsciiLong(entry.getUID(), 6, 8);
+ writeAsciiLong(entry.getGID(), 6, 8);
+ writeAsciiLong(entry.getNumberOfLinks(), 6, 8);
+ writeAsciiLong(entry.getRemoteDevice(), 6, 8);
+ writeAsciiLong(entry.getTime(), 11, 8);
+ writeAsciiLong(entry.getName().length() + 1, 6, 8);
+ writeAsciiLong(entry.getSize(), 11, 8);
+ writeCString(entry.getName());
+ }
+
+ private void writeOldBinaryEntry(final CpioArchiveEntry entry,
+ final boolean swapHalfWord) throws IOException {
+ long inode = entry.getInode();
+ long device = entry.getDevice();
+ if (CPIO_TRAILER.equals(entry.getName())) {
+ inode = device = 0;
+ } else {
+ if (inode == 0 && device == 0) {
+ inode = nextArtificalDeviceAndInode & 0xFFFF;
+ device = (nextArtificalDeviceAndInode++ >> 16) & 0xFFFF;
+ } else {
+ nextArtificalDeviceAndInode =
+ Math.max(nextArtificalDeviceAndInode,
+ inode + 0x10000 * device) + 1;
+ }
+ }
+
+ writeBinaryLong(device, 2, swapHalfWord);
+ writeBinaryLong(inode, 2, swapHalfWord);
+ writeBinaryLong(entry.getMode(), 2, swapHalfWord);
+ writeBinaryLong(entry.getUID(), 2, swapHalfWord);
+ writeBinaryLong(entry.getGID(), 2, swapHalfWord);
+ writeBinaryLong(entry.getNumberOfLinks(), 2, swapHalfWord);
+ writeBinaryLong(entry.getRemoteDevice(), 2, swapHalfWord);
+ writeBinaryLong(entry.getTime(), 4, swapHalfWord);
+ writeBinaryLong(entry.getName().length() + 1, 2, swapHalfWord);
+ writeBinaryLong(entry.getSize(), 4, swapHalfWord);
+ writeCString(entry.getName());
+ pad(entry.getHeaderPadCount());
+ }
+
+ /*(non-Javadoc)
+ *
+ * @see
+ * org.apache.commons.compress.archivers.ArchiveOutputStream#closeArchiveEntry
+ * ()
+ */
+ @Override
+ public void closeArchiveEntry() throws IOException {
+ if(finished) {
+ throw new IOException("Stream has already been finished");
+ }
+
+ ensureOpen();
+
+ if (entry == null) {
+ throw new IOException("Trying to close non-existent entry");
+ }
+
+ if (this.entry.getSize() != this.written) {
+ throw new IOException("invalid entry size (expected "
+ + this.entry.getSize() + " but got " + this.written
+ + " bytes)");
+ }
+ pad(this.entry.getDataPadCount());
+ if (this.entry.getFormat() == FORMAT_NEW_CRC
+ && this.crc != this.entry.getChksum()) {
+ throw new IOException("CRC Error");
+ }
+ this.entry = null;
+ this.crc = 0;
+ this.written = 0;
+ }
+
+ /**
+ * Writes an array of bytes to the current CPIO entry data. This method will
+ * block until all the bytes are written.
+ *
+ * @param b
+ * the data to be written
+ * @param off
+ * the start offset in the data
+ * @param len
+ * the number of bytes that are written
+ * @throws IOException
+ * if an I/O error has occurred or if a CPIO file error has
+ * occurred
+ */
+ @Override
+ public void write(final byte[] b, final int off, final int len)
+ throws IOException {
+ ensureOpen();
+ if (off < 0 || len < 0 || off > b.length - len) {
+ throw new IndexOutOfBoundsException();
+ } else if (len == 0) {
+ return;
+ }
+
+ if (this.entry == null) {
+ throw new IOException("no current CPIO entry");
+ }
+ if (this.written + len > this.entry.getSize()) {
+ throw new IOException("attempt to write past end of STORED entry");
+ }
+ out.write(b, off, len);
+ this.written += len;
+ if (this.entry.getFormat() == FORMAT_NEW_CRC) {
+ for (int pos = 0; pos < len; pos++) {
+ this.crc += b[pos] & 0xFF;
+ }
+ }
+ count(len);
+ }
+
+ /**
+ * Finishes writing the contents of the CPIO output stream without closing
+ * the underlying stream. Use this method when applying multiple filters in
+ * succession to the same output stream.
+ *
+ * @throws IOException
+ * if an I/O exception has occurred or if a CPIO file error has
+ * occurred
+ */
+ @Override
+ public void finish() throws IOException {
+ ensureOpen();
+ if (finished) {
+ throw new IOException("This archive has already been finished");
+ }
+
+ if (this.entry != null) {
+ throw new IOException("This archive contains unclosed entries.");
+ }
+ this.entry = new CpioArchiveEntry(this.entryFormat);
+ this.entry.setName(CPIO_TRAILER);
+ this.entry.setNumberOfLinks(1);
+ writeHeader(this.entry);
+ closeArchiveEntry();
+
+ int lengthOfLastBlock = (int) (getBytesWritten() % blockSize);
+ if (lengthOfLastBlock != 0) {
+ pad(blockSize - lengthOfLastBlock);
+ }
+
+ finished = true;
+ }
+
+ /**
+ * Closes the CPIO output stream as well as the stream being filtered.
+ *
+ * @throws IOException
+ * if an I/O error has occurred or if a CPIO file error has
+ * occurred
+ */
+ @Override
+ public void close() throws IOException {
+ if(!finished) {
+ finish();
+ }
+
+ if (!this.closed) {
+ out.close();
+ this.closed = true;
+ }
+ }
+
+ private void pad(int count) throws IOException{
+ if (count > 0){
+ byte buff[] = new byte[count];
+ out.write(buff);
+ count(count);
+ }
+ }
+
+ private void writeBinaryLong(final long number, final int length,
+ final boolean swapHalfWord) throws IOException {
+ byte tmp[] = CpioUtil.long2byteArray(number, length, swapHalfWord);
+ out.write(tmp);
+ count(tmp.length);
+ }
+
+ private void writeAsciiLong(final long number, final int length,
+ final int radix) throws IOException {
+ StringBuilder tmp = new StringBuilder();
+ String tmpStr;
+ if (radix == 16) {
+ tmp.append(Long.toHexString(number));
+ } else if (radix == 8) {
+ tmp.append(Long.toOctalString(number));
+ } else {
+ tmp.append(Long.toString(number));
+ }
+
+ if (tmp.length() <= length) {
+ long insertLength = length - tmp.length();
+ for (int pos = 0; pos < insertLength; pos++) {
+ tmp.insert(0, "0");
+ }
+ tmpStr = tmp.toString();
+ } else {
+ tmpStr = tmp.substring(tmp.length() - length);
+ }
+ byte[] b = ArchiveUtils.toAsciiBytes(tmpStr);
+ out.write(b);
+ count(b.length);
+ }
+
+ /**
+ * Writes an ASCII string to the stream followed by \0
+ * @param str the String to write
+ * @throws IOException if the string couldn't be written
+ */
+ private void writeCString(final String str) throws IOException {
+ ByteBuffer buf = encoding.encode(str);
+ final int len = buf.limit() - buf.position();
+ out.write(buf.array(), buf.arrayOffset(), len);
+ out.write('\0');
+ count(len + 1);
+ }
+
+ /**
+ * Creates a new ArchiveEntry. The entryName must be an ASCII encoded string.
+ *
+ * @see org.apache.commons.compress.archivers.ArchiveOutputStream#createArchiveEntry(java.io.File, java.lang.String)
+ */
+ @Override
+ public ArchiveEntry createArchiveEntry(File inputFile, String entryName)
+ throws IOException {
+ if(finished) {
+ throw new IOException("Stream has already been finished");
+ }
+ return new CpioArchiveEntry(inputFile, entryName);
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioConstants.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioConstants.java
new file mode 100644
index 000000000..b480d79c1
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioConstants.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.cpio;
+
+/**
+ * All constants needed by CPIO.
+ *
+ * based on code from the jRPM project (jrpm.sourceforge.net)
+ *
+ */
+public interface CpioConstants {
+ /** magic number of a cpio entry in the new format */
+ final String MAGIC_NEW = "070701";
+
+ /** magic number of a cpio entry in the new format with crc */
+ final String MAGIC_NEW_CRC = "070702";
+
+ /** magic number of a cpio entry in the old ascii format */
+ final String MAGIC_OLD_ASCII = "070707";
+
+ /** magic number of a cpio entry in the old binary format */
+ final int MAGIC_OLD_BINARY = 070707;
+
+ // These FORMAT_ constants are internal to the code
+
+ /** write/read a CPIOArchiveEntry in the new format */
+ final short FORMAT_NEW = 1;
+
+ /** write/read a CPIOArchiveEntry in the new format with crc */
+ final short FORMAT_NEW_CRC = 2;
+
+ /** write/read a CPIOArchiveEntry in the old ascii format */
+ final short FORMAT_OLD_ASCII = 4;
+
+ /** write/read a CPIOArchiveEntry in the old binary format */
+ final short FORMAT_OLD_BINARY = 8;
+
+ /** Mask for both new formats */
+ final short FORMAT_NEW_MASK = 3;
+
+ /** Mask for both old formats */
+ final short FORMAT_OLD_MASK = 12;
+
+ /*
+ * Constants for the MODE bits
+ */
+
+ /** Mask for all file type bits. */
+ final int S_IFMT = 0170000;
+
+ // http://www.opengroup.org/onlinepubs/9699919799/basedefs/cpio.h.html
+ // has a list of the C_xxx constatnts
+
+ /** Defines a socket */
+ final int C_ISSOCK = 0140000;
+
+ /** Defines a symbolic link */
+ final int C_ISLNK = 0120000;
+
+ /** HP/UX network special (C_ISCTG) */
+ final int C_ISNWK = 0110000;
+
+ /** Defines a regular file */
+ final int C_ISREG = 0100000;
+
+ /** Defines a block device */
+ final int C_ISBLK = 0060000;
+
+ /** Defines a directory */
+ final int C_ISDIR = 0040000;
+
+ /** Defines a character device */
+ final int C_ISCHR = 0020000;
+
+ /** Defines a pipe */
+ final int C_ISFIFO = 0010000;
+
+
+ /** Set user ID */
+ final int C_ISUID = 0004000;
+
+ /** Set group ID */
+ final int C_ISGID = 0002000;
+
+ /** On directories, restricted deletion flag. */
+ final int C_ISVTX = 0001000;
+
+
+ /** Permits the owner of a file to read the file */
+ final int C_IRUSR = 0000400;
+
+ /** Permits the owner of a file to write to the file */
+ final int C_IWUSR = 0000200;
+
+ /** Permits the owner of a file to execute the file or to search the directory */
+ final int C_IXUSR = 0000100;
+
+
+ /** Permits a file's group to read the file */
+ final int C_IRGRP = 0000040;
+
+ /** Permits a file's group to write to the file */
+ final int C_IWGRP = 0000020;
+
+ /** Permits a file's group to execute the file or to search the directory */
+ final int C_IXGRP = 0000010;
+
+
+ /** Permits others to read the file */
+ final int C_IROTH = 0000004;
+
+ /** Permits others to write to the file */
+ final int C_IWOTH = 0000002;
+
+ /** Permits others to execute the file or to search the directory */
+ final int C_IXOTH = 0000001;
+
+ /** The special trailer marker */
+ final String CPIO_TRAILER = "TRAILER!!!";
+
+ /**
+ * The default block size.
+ *
+ * @since 1.1
+ */
+ final int BLOCK_SIZE = 512;
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioUtil.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioUtil.java
new file mode 100644
index 000000000..26b51fc48
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/CpioUtil.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.cpio;
+
+/**
+ * Package private utility class for Cpio
+ *
+ * @Immutable
+ */
+class CpioUtil {
+
+ /**
+ * Extracts the file type bits from a mode.
+ */
+ static long fileType(long mode) {
+ return mode & CpioConstants.S_IFMT;
+ }
+
+ /**
+ * Converts a byte array to a long. Halfwords can be swapped by setting
+ * swapHalfWord=true.
+ *
+ * @param number
+ * An array of bytes containing a number
+ * @param swapHalfWord
+ * Swap halfwords ([0][1][2][3]->[1][0][3][2])
+ * @return The long value
+ * @throws UnsupportedOperationException if number length is not a multiple of 2
+ */
+ static long byteArray2long(final byte[] number, final boolean swapHalfWord) {
+ if (number.length % 2 != 0) {
+ throw new UnsupportedOperationException();
+ }
+
+ long ret = 0;
+ int pos = 0;
+ byte tmp_number[] = new byte[number.length];
+ System.arraycopy(number, 0, tmp_number, 0, number.length);
+
+ if (!swapHalfWord) {
+ byte tmp = 0;
+ for (pos = 0; pos < tmp_number.length; pos++) {
+ tmp = tmp_number[pos];
+ tmp_number[pos++] = tmp_number[pos];
+ tmp_number[pos] = tmp;
+ }
+ }
+
+ ret = tmp_number[0] & 0xFF;
+ for (pos = 1; pos < tmp_number.length; pos++) {
+ ret <<= 8;
+ ret |= tmp_number[pos] & 0xFF;
+ }
+ return ret;
+ }
+
+ /**
+ * Converts a long number to a byte array
+ * Halfwords can be swapped by setting swapHalfWord=true.
+ *
+ * @param number
+ * the input long number to be converted
+ *
+ * @param length
+ * The length of the returned array
+ * @param swapHalfWord
+ * Swap halfwords ([0][1][2][3]->[1][0][3][2])
+ * @return The long value
+ * @throws UnsupportedOperationException if the length is not a positive multiple of two
+ */
+ static byte[] long2byteArray(final long number, final int length,
+ final boolean swapHalfWord) {
+ byte[] ret = new byte[length];
+ int pos = 0;
+ long tmp_number = 0;
+
+ if (length % 2 != 0 || length < 2) {
+ throw new UnsupportedOperationException();
+ }
+
+ tmp_number = number;
+ for (pos = length - 1; pos >= 0; pos--) {
+ ret[pos] = (byte) (tmp_number & 0xFF);
+ tmp_number >>= 8;
+ }
+
+ if (!swapHalfWord) {
+ byte tmp = 0;
+ for (pos = 0; pos < length; pos++) {
+ tmp = ret[pos];
+ ret[pos++] = ret[pos];
+ ret[pos] = tmp;
+ }
+ }
+
+ return ret;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/package.html
new file mode 100644
index 000000000..985828725
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/cpio/package.html
@@ -0,0 +1,24 @@
+
+
+
+ Provides stream classes for reading and writing archives using
+ the CPIO format.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/Dirent.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/Dirent.java
new file mode 100644
index 000000000..34e0ef791
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/Dirent.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+/**
+ * Directory entry.
+ */
+class Dirent {
+ private final int ino;
+ private final int parentIno;
+ private final int type;
+ private final String name;
+
+ /**
+ * Constructor
+ *
+ * @param ino
+ * @param parentIno
+ * @param type
+ * @param name
+ */
+ Dirent(int ino, int parentIno, int type, String name) {
+ this.ino = ino;
+ this.parentIno = parentIno;
+ this.type = type;
+ this.name = name;
+ }
+
+ /**
+ * Get ino.
+ * @return the i-node
+ */
+ int getIno() {
+ return ino;
+ }
+
+ /**
+ * Get ino of parent directory.
+ * @return the parent i-node
+ */
+ int getParentIno() {
+ return parentIno;
+ }
+
+ /**
+ * Get entry type.
+ * @return the entry type
+ */
+ int getType() {
+ return type;
+ }
+
+ /**
+ * Get name of directory entry.
+ * @return the directory name
+ */
+ String getName() {
+ return name;
+ }
+
+ /**
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return String.format("[%d]: %s", Integer.valueOf(ino), name);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveConstants.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveConstants.java
new file mode 100644
index 000000000..87ca8d913
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveConstants.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+/**
+ * Various constants associated with dump archives.
+ */
+public final class DumpArchiveConstants {
+ public static final int TP_SIZE = 1024;
+ public static final int NTREC = 10;
+ public static final int HIGH_DENSITY_NTREC = 32;
+ public static final int OFS_MAGIC = 60011;
+ public static final int NFS_MAGIC = 60012;
+ public static final int FS_UFS2_MAGIC = 0x19540119;
+ public static final int CHECKSUM = 84446;
+ public static final int LBLSIZE = 16;
+ public static final int NAMELEN = 64;
+
+ /* do not instantiate */
+ private DumpArchiveConstants() {
+ }
+
+ /**
+ * The type of tape segment.
+ */
+ public enum SEGMENT_TYPE {
+ TAPE(1),
+ INODE(2),
+ BITS(3),
+ ADDR(4),
+ END(5),
+ CLRI(6);
+
+ int code;
+
+ private SEGMENT_TYPE(int code) {
+ this.code = code;
+ }
+
+ public static SEGMENT_TYPE find(int code) {
+ for (SEGMENT_TYPE t : values()) {
+ if (t.code == code) {
+ return t;
+ }
+ }
+
+ return null;
+ }
+ }
+
+ /**
+ * The type of compression.
+ */
+ public enum COMPRESSION_TYPE {
+ ZLIB(0),
+ BZLIB(1),
+ LZO(2);
+
+ int code;
+
+ private COMPRESSION_TYPE(int code) {
+ this.code = code;
+ }
+
+ public static COMPRESSION_TYPE find(int code) {
+ for (COMPRESSION_TYPE t : values()) {
+ if (t.code == code) {
+ return t;
+ }
+ }
+
+ return null;
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveEntry.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveEntry.java
new file mode 100644
index 000000000..1cb62c75d
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveEntry.java
@@ -0,0 +1,809 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+import java.util.Collections;
+import java.util.Date;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Set;
+import org.apache.commons.compress.archivers.ArchiveEntry;
+
+/**
+ * This class represents an entry in a Dump archive. It consists
+ * of the entry's header, the entry's File and any extended attributes.
+ *
+ * DumpEntries that are created from the header bytes read from
+ * an archive are instantiated with the DumpArchiveEntry( byte[] )
+ * constructor. These entries will be used when extracting from
+ * or listing the contents of an archive. These entries have their
+ * header filled in using the header bytes. They also set the File
+ * to null, since they reference an archive entry not a file.
+ *
+ * DumpEntries can also be constructed from nothing but a name.
+ * This allows the programmer to construct the entry by hand, for
+ * instance when only an InputStream is available for writing to
+ * the archive, and the header information is constructed from
+ * other information. In this case the header fields are set to
+ * defaults and the File is set to null.
+ *
+ *
+ * The C structure for a Dump Entry's header is:
+ *
+ * #define TP_BSIZE 1024 // size of each file block
+ * #define NTREC 10 // number of blocks to write at once
+ * #define HIGHDENSITYTREC 32 // number of blocks to write on high-density tapes
+ * #define TP_NINDIR (TP_BSIZE/2) // number if indirect inodes in record
+ * #define TP_NINOS (TP_NINDIR / sizeof (int32_t))
+ * #define LBLSIZE 16
+ * #define NAMELEN 64
+ *
+ * #define OFS_MAGIC (int)60011 // old format magic value
+ * #define NFS_MAGIC (int)60012 // new format magic value
+ * #define FS_UFS2_MAGIC (int)0x19540119
+ * #define CHECKSUM (int)84446 // constant used in checksum algorithm
+ *
+ * struct s_spcl {
+ * int32_t c_type; // record type (see below)
+ * int32_t c_date; // date of this dump
+ * int32_t c_ddate; // date of previous dump
+ * int32_t c_volume; // dump volume number
+ * u_int32_t c_tapea; // logical block of this record
+ * dump_ino_t c_ino; // number of inode
+ * int32_t c_magic; // magic number (see above)
+ * int32_t c_checksum; // record checksum
+ * #ifdef __linux__
+ * struct new_bsd_inode c_dinode;
+ * #else
+ * #ifdef sunos
+ * struct new_bsd_inode c_dinode;
+ * #else
+ * struct dinode c_dinode; // ownership and mode of inode
+ * #endif
+ * #endif
+ * int32_t c_count; // number of valid c_addr entries
+ * union u_data c_data; // see above
+ * char c_label[LBLSIZE]; // dump label
+ * int32_t c_level; // level of this dump
+ * char c_filesys[NAMELEN]; // name of dumpped file system
+ * char c_dev[NAMELEN]; // name of dumpped device
+ * char c_host[NAMELEN]; // name of dumpped host
+ * int32_t c_flags; // additional information (see below)
+ * int32_t c_firstrec; // first record on volume
+ * int32_t c_ntrec; // blocksize on volume
+ * int32_t c_extattributes; // additional inode info (see below)
+ * int32_t c_spare[30]; // reserved for future uses
+ * } s_spcl;
+ *
+ * //
+ * // flag values
+ * //
+ * #define DR_NEWHEADER 0x0001 // new format tape header
+ * #define DR_NEWINODEFMT 0x0002 // new format inodes on tape
+ * #define DR_COMPRESSED 0x0080 // dump tape is compressed
+ * #define DR_METAONLY 0x0100 // only the metadata of the inode has been dumped
+ * #define DR_INODEINFO 0x0002 // [SIC] TS_END header contains c_inos information
+ * #define DR_EXTATTRIBUTES 0x8000
+ *
+ * //
+ * // extattributes inode info
+ * //
+ * #define EXT_REGULAR 0
+ * #define EXT_MACOSFNDRINFO 1
+ * #define EXT_MACOSRESFORK 2
+ * #define EXT_XATTR 3
+ *
+ * // used for EA on tape
+ * #define EXT2_GOOD_OLD_INODE_SIZE 128
+ * #define EXT2_XATTR_MAGIC 0xEA020000 // block EA
+ * #define EXT2_XATTR_MAGIC2 0xEA020001 // in inode EA
+ *
+ *
+ * The fields in bold are the same for all blocks. (This permitted
+ * multiple dumps to be written to a single tape.)
+ *
+ *
+ *
+ * The C structure for the inode (file) information is:
+ *
+ * struct bsdtimeval { // **** alpha-*-linux is deviant
+ * __u32 tv_sec;
+ * __u32 tv_usec;
+ * };
+ *
+ * #define NDADDR 12
+ * #define NIADDR 3
+ *
+ * //
+ * // This is the new (4.4) BSD inode structure
+ * // copied from the FreeBSD 2.0 <ufs/ufs/dinode.h> include file
+ * //
+ * struct new_bsd_inode {
+ * __u16 di_mode; // file type, standard Unix permissions
+ * __s16 di_nlink; // number of hard links to file.
+ * union {
+ * __u16 oldids[2];
+ * __u32 inumber;
+ * } di_u;
+ * u_quad_t di_size; // file size
+ * struct bsdtimeval di_atime; // time file was last accessed
+ * struct bsdtimeval di_mtime; // time file was last modified
+ * struct bsdtimeval di_ctime; // time file was created
+ * __u32 di_db[NDADDR];
+ * __u32 di_ib[NIADDR];
+ * __u32 di_flags; //
+ * __s32 di_blocks; // number of disk blocks
+ * __s32 di_gen; // generation number
+ * __u32 di_uid; // user id (see /etc/passwd)
+ * __u32 di_gid; // group id (see /etc/group)
+ * __s32 di_spare[2]; // unused
+ * };
+ *
+ *
+ * It is important to note that the header DOES NOT have the name of the
+ * file. It can't since hard links mean that you may have multiple filenames
+ * for a single physical file. You must read the contents of the directory
+ * entries to learn the mapping(s) from filename to inode.
+ *
+ *
+ *
+ * The C structure that indicates if a specific block is a real block
+ * that contains data or is a sparse block that is not persisted to the
+ * disk is:
+ *
+ * #define TP_BSIZE 1024
+ * #define TP_NINDIR (TP_BSIZE/2)
+ *
+ * union u_data {
+ * char s_addrs[TP_NINDIR]; // 1 => data; 0 => hole in inode
+ * int32_t s_inos[TP_NINOS]; // table of first inode on each volume
+ * } u_data;
+ *
+ *
+ * @NotThreadSafe
+ */
+public class DumpArchiveEntry implements ArchiveEntry {
+ private String name;
+ private TYPE type = TYPE.UNKNOWN;
+ private int mode;
+ private Set permissions = Collections.emptySet();
+ private long size;
+ private long atime;
+ private long mtime;
+ private int uid;
+ private int gid;
+
+ /**
+ * Currently unused
+ */
+ private final DumpArchiveSummary summary = null;
+
+ // this information is available from standard index.
+ private final TapeSegmentHeader header = new TapeSegmentHeader();
+ private String simpleName;
+ private String originalName;
+
+ // this information is available from QFA index
+ private int volume;
+ private long offset;
+ private int ino;
+ private int nlink;
+ private long ctime;
+ private int generation;
+ private boolean isDeleted;
+
+ /**
+ * Default constructor.
+ */
+ public DumpArchiveEntry() {
+ }
+
+ /**
+ * Constructor taking only filename.
+ * @param name pathname
+ * @param simpleName actual filename.
+ */
+ public DumpArchiveEntry(String name, String simpleName) {
+ setName(name);
+ this.simpleName = simpleName;
+ }
+
+ /**
+ * Constructor taking name, inode and type.
+ *
+ * @param name
+ * @param simpleName
+ * @param ino
+ * @param type
+ */
+ protected DumpArchiveEntry(String name, String simpleName, int ino,
+ TYPE type) {
+ setType(type);
+ setName(name);
+ this.simpleName = simpleName;
+ this.ino = ino;
+ this.offset = 0;
+ }
+
+ /**
+ * Constructor taking tape buffer.
+ * @param buffer
+ * @param offset
+ */
+
+ /**
+ * Returns the path of the entry.
+ * @return the path of the entry.
+ */
+ public String getSimpleName() {
+ return simpleName;
+ }
+
+ /**
+ * Sets the path of the entry.
+ */
+ protected void setSimpleName(String simpleName) {
+ this.simpleName = simpleName;
+ }
+
+ /**
+ * Returns the ino of the entry.
+ */
+ public int getIno() {
+ return header.getIno();
+ }
+
+ /**
+ * Return the number of hard links to the entry.
+ */
+ public int getNlink() {
+ return nlink;
+ }
+
+ /**
+ * Set the number of hard links.
+ */
+ public void setNlink(int nlink) {
+ this.nlink = nlink;
+ }
+
+ /**
+ * Get file creation time.
+ */
+ public Date getCreationTime() {
+ return new Date(ctime);
+ }
+
+ /**
+ * Set the file creation time.
+ */
+ public void setCreationTime(Date ctime) {
+ this.ctime = ctime.getTime();
+ }
+
+ /**
+ * Return the generation of the file.
+ */
+ public int getGeneration() {
+ return generation;
+ }
+
+ /**
+ * Set the generation of the file.
+ */
+ public void setGeneration(int generation) {
+ this.generation = generation;
+ }
+
+ /**
+ * Has this file been deleted? (On valid on incremental dumps.)
+ */
+ public boolean isDeleted() {
+ return isDeleted;
+ }
+
+ /**
+ * Set whether this file has been deleted.
+ */
+ public void setDeleted(boolean isDeleted) {
+ this.isDeleted = isDeleted;
+ }
+
+ /**
+ * Return the offset within the archive
+ */
+ public long getOffset() {
+ return offset;
+ }
+
+ /**
+ * Set the offset within the archive.
+ */
+ public void setOffset(long offset) {
+ this.offset = offset;
+ }
+
+ /**
+ * Return the tape volume where this file is located.
+ */
+ public int getVolume() {
+ return volume;
+ }
+
+ /**
+ * Set the tape volume.
+ */
+ public void setVolume(int volume) {
+ this.volume = volume;
+ }
+
+ /**
+ * Return the type of the tape segment header.
+ */
+ public DumpArchiveConstants.SEGMENT_TYPE getHeaderType() {
+ return header.getType();
+ }
+
+ /**
+ * Return the number of records in this segment.
+ */
+ public int getHeaderCount() {
+ return header.getCount();
+ }
+
+ /**
+ * Return the number of sparse records in this segment.
+ */
+ public int getHeaderHoles() {
+ return header.getHoles();
+ }
+
+ /**
+ * Is this a sparse record?
+ */
+ public boolean isSparseRecord(int idx) {
+ return (header.getCdata(idx) & 0x01) == 0;
+ }
+
+ /**
+ * @see java.lang.Object#hashCode()
+ */
+ @Override
+ public int hashCode() {
+ return ino;
+ }
+
+ /**
+ * @see java.lang.Object#equals(Object o)
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (o == this) {
+ return true;
+ } else if (o == null || !o.getClass().equals(getClass())) {
+ return false;
+ }
+
+ DumpArchiveEntry rhs = (DumpArchiveEntry) o;
+
+ if ((header == null) || (rhs.header == null)) {
+ return false;
+ }
+
+ if (ino != rhs.ino) {
+ return false;
+ }
+
+ if ((summary == null && rhs.summary != null)
+ || (summary != null && !summary.equals(rhs.summary))) {
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return getName();
+ }
+
+ /**
+ * Populate the dump archive entry and tape segment header with
+ * the contents of the buffer.
+ *
+ * @param buffer
+ * @throws Exception
+ */
+ static DumpArchiveEntry parse(byte[] buffer) {
+ DumpArchiveEntry entry = new DumpArchiveEntry();
+ TapeSegmentHeader header = entry.header;
+
+ header.type = DumpArchiveConstants.SEGMENT_TYPE.find(DumpArchiveUtil.convert32(
+ buffer, 0));
+
+ //header.dumpDate = new Date(1000L * DumpArchiveUtil.convert32(buffer, 4));
+ //header.previousDumpDate = new Date(1000L * DumpArchiveUtil.convert32(
+ // buffer, 8));
+ header.volume = DumpArchiveUtil.convert32(buffer, 12);
+ //header.tapea = DumpArchiveUtil.convert32(buffer, 16);
+ entry.ino = header.ino = DumpArchiveUtil.convert32(buffer, 20);
+
+ //header.magic = DumpArchiveUtil.convert32(buffer, 24);
+ //header.checksum = DumpArchiveUtil.convert32(buffer, 28);
+ int m = DumpArchiveUtil.convert16(buffer, 32);
+
+ // determine the type of the file.
+ entry.setType(TYPE.find((m >> 12) & 0x0F));
+
+ // determine the standard permissions
+ entry.setMode(m);
+
+ entry.nlink = DumpArchiveUtil.convert16(buffer, 34);
+ // inumber, oldids?
+ entry.setSize(DumpArchiveUtil.convert64(buffer, 40));
+
+ long t = (1000L * DumpArchiveUtil.convert32(buffer, 48)) +
+ (DumpArchiveUtil.convert32(buffer, 52) / 1000);
+ entry.setAccessTime(new Date(t));
+ t = (1000L * DumpArchiveUtil.convert32(buffer, 56)) +
+ (DumpArchiveUtil.convert32(buffer, 60) / 1000);
+ entry.setLastModifiedDate(new Date(t));
+ t = (1000L * DumpArchiveUtil.convert32(buffer, 64)) +
+ (DumpArchiveUtil.convert32(buffer, 68) / 1000);
+ entry.ctime = t;
+
+ // db: 72-119 - direct blocks
+ // id: 120-131 - indirect blocks
+ //entry.flags = DumpArchiveUtil.convert32(buffer, 132);
+ //entry.blocks = DumpArchiveUtil.convert32(buffer, 136);
+ entry.generation = DumpArchiveUtil.convert32(buffer, 140);
+ entry.setUserId(DumpArchiveUtil.convert32(buffer, 144));
+ entry.setGroupId(DumpArchiveUtil.convert32(buffer, 148));
+ // two 32-bit spare values.
+ header.count = DumpArchiveUtil.convert32(buffer, 160);
+
+ header.holes = 0;
+
+ for (int i = 0; (i < 512) && (i < header.count); i++) {
+ if (buffer[164 + i] == 0) {
+ header.holes++;
+ }
+ }
+
+ System.arraycopy(buffer, 164, header.cdata, 0, 512);
+
+ entry.volume = header.getVolume();
+
+ //entry.isSummaryOnly = false;
+ return entry;
+ }
+
+ /**
+ * Update entry with information from next tape segment header.
+ */
+ void update(byte[] buffer) {
+ header.volume = DumpArchiveUtil.convert32(buffer, 16);
+ header.count = DumpArchiveUtil.convert32(buffer, 160);
+
+ header.holes = 0;
+
+ for (int i = 0; (i < 512) && (i < header.count); i++) {
+ if (buffer[164 + i] == 0) {
+ header.holes++;
+ }
+ }
+
+ System.arraycopy(buffer, 164, header.cdata, 0, 512);
+ }
+
+ /**
+ * Archive entry as stored on tape. There is one TSH for (at most)
+ * every 512k in the file.
+ */
+ static class TapeSegmentHeader {
+ private DumpArchiveConstants.SEGMENT_TYPE type;
+ private int volume;
+ private int ino;
+ private int count;
+ private int holes;
+ private final byte[] cdata = new byte[512]; // map of any 'holes'
+
+ public DumpArchiveConstants.SEGMENT_TYPE getType() {
+ return type;
+ }
+
+ public int getVolume() {
+ return volume;
+ }
+
+ public int getIno() {
+ return ino;
+ }
+
+ void setIno(int ino) {
+ this.ino = ino;
+ }
+
+ public int getCount() {
+ return count;
+ }
+
+ public int getHoles() {
+ return holes;
+ }
+
+ public int getCdata(int idx) {
+ return cdata[idx];
+ }
+ }
+
+ /**
+ * Returns the name of the entry.
+ * @return the name of the entry.
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * Returns the unmodified name of the entry.
+ * @return the name of the entry.
+ */
+ String getOriginalName() {
+ return originalName;
+ }
+
+ /**
+ * Sets the name of the entry.
+ */
+ public final void setName(String name) {
+ this.originalName = name;
+ if (name != null) {
+ if (isDirectory() && !name.endsWith("/")) {
+ name += "/";
+ }
+ if (name.startsWith("./")) {
+ name = name.substring(2);
+ }
+ }
+ this.name = name;
+ }
+
+ public Date getLastModifiedDate() {
+ return new Date(mtime);
+ }
+
+ /**
+ * Is this a directory?
+ */
+ public boolean isDirectory() {
+ return type == TYPE.DIRECTORY;
+ }
+
+ /**
+ * Is this a regular file?
+ */
+ public boolean isFile() {
+ return type == TYPE.FILE;
+ }
+
+ /**
+ * Is this a network device?
+ */
+ public boolean isSocket() {
+ return type == TYPE.SOCKET;
+ }
+
+ /**
+ * Is this a character device?
+ */
+ public boolean isChrDev() {
+ return type == TYPE.CHRDEV;
+ }
+
+ /**
+ * Is this a block device?
+ */
+ public boolean isBlkDev() {
+ return type == TYPE.BLKDEV;
+ }
+
+ /**
+ * Is this a fifo/pipe?
+ */
+ public boolean isFifo() {
+ return type == TYPE.FIFO;
+ }
+
+ /**
+ * Get the type of the entry.
+ */
+ public TYPE getType() {
+ return type;
+ }
+
+ /**
+ * Set the type of the entry.
+ */
+ public void setType(TYPE type) {
+ this.type = type;
+ }
+
+ /**
+ * Return the access permissions on the entry.
+ */
+ public int getMode() {
+ return mode;
+ }
+
+ /**
+ * Set the access permissions on the entry.
+ */
+ public void setMode(int mode) {
+ this.mode = mode & 07777;
+ this.permissions = PERMISSION.find(mode);
+ }
+
+ /**
+ * Returns the permissions on the entry.
+ */
+ public Set getPermissions() {
+ return permissions;
+ }
+
+ /**
+ * Returns the size of the entry.
+ */
+ public long getSize() {
+ return isDirectory() ? SIZE_UNKNOWN : size;
+ }
+
+ /**
+ * Returns the size of the entry as read from the archive.
+ */
+ long getEntrySize() {
+ return size;
+ }
+
+ /**
+ * Set the size of the entry.
+ */
+ public void setSize(long size) {
+ this.size = size;
+ }
+
+ /**
+ * Set the time the file was last modified.
+ */
+ public void setLastModifiedDate(Date mtime) {
+ this.mtime = mtime.getTime();
+ }
+
+ /**
+ * Returns the time the file was last accessed.
+ */
+ public Date getAccessTime() {
+ return new Date(atime);
+ }
+
+ /**
+ * Set the time the file was last accessed.
+ */
+ public void setAccessTime(Date atime) {
+ this.atime = atime.getTime();
+ }
+
+ /**
+ * Return the user id.
+ */
+ public int getUserId() {
+ return uid;
+ }
+
+ /**
+ * Set the user id.
+ */
+ public void setUserId(int uid) {
+ this.uid = uid;
+ }
+
+ /**
+ * Return the group id
+ */
+ public int getGroupId() {
+ return gid;
+ }
+
+ /**
+ * Set the group id.
+ */
+ public void setGroupId(int gid) {
+ this.gid = gid;
+ }
+
+ public enum TYPE {
+ WHITEOUT(14),
+ SOCKET(12),
+ LINK(10),
+ FILE(8),
+ BLKDEV(6),
+ DIRECTORY(4),
+ CHRDEV(2),
+ FIFO(1),
+ UNKNOWN(15);
+
+ private int code;
+
+ private TYPE(int code) {
+ this.code = code;
+ }
+
+ public static TYPE find(int code) {
+ TYPE type = UNKNOWN;
+
+ for (TYPE t : TYPE.values()) {
+ if (code == t.code) {
+ type = t;
+ }
+ }
+
+ return type;
+ }
+ }
+
+ public enum PERMISSION {
+ SETUID(04000),
+ SETGUI(02000),
+ STICKY(01000),
+ USER_READ(00400),
+ USER_WRITE(00200),
+ USER_EXEC(00100),
+ GROUP_READ(00040),
+ GROUP_WRITE(00020),
+ GROUP_EXEC(00010),
+ WORLD_READ(00004),
+ WORLD_WRITE(00002),
+ WORLD_EXEC(00001);
+
+ private int code;
+
+ private PERMISSION(int code) {
+ this.code = code;
+ }
+
+ public static Set find(int code) {
+ Set set = new HashSet();
+
+ for (PERMISSION p : PERMISSION.values()) {
+ if ((code & p.code) == p.code) {
+ set.add(p);
+ }
+ }
+
+ if (set.isEmpty()) {
+ return Collections.emptySet();
+ }
+
+ return EnumSet.copyOf(set);
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveException.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveException.java
new file mode 100644
index 000000000..8e6a9937f
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveException.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+import java.io.IOException;
+
+
+/**
+ * Dump Archive Exception
+ */
+public class DumpArchiveException extends IOException {
+ private static final long serialVersionUID = 1L;
+
+ public DumpArchiveException() {
+ }
+
+ public DumpArchiveException(String msg) {
+ super(msg);
+ }
+
+ public DumpArchiveException(Throwable cause) {
+ initCause(cause);
+ }
+
+ public DumpArchiveException(String msg, Throwable cause) {
+ super(msg);
+ initCause(cause);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveInputStream.java
new file mode 100644
index 000000000..0f804545d
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveInputStream.java
@@ -0,0 +1,548 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+import org.apache.commons.compress.archivers.ArchiveException;
+import org.apache.commons.compress.archivers.ArchiveInputStream;
+import org.apache.commons.compress.archivers.zip.ZipEncoding;
+import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.PriorityQueue;
+import java.util.Queue;
+import java.util.Stack;
+
+/**
+ * The DumpArchiveInputStream reads a UNIX dump archive as an InputStream.
+ * Methods are provided to position at each successive entry in
+ * the archive, and the read each entry as a normal input stream
+ * using read().
+ *
+ * There doesn't seem to exist a hint on the encoding of string values
+ * in any piece documentation. Given the main purpose of dump/restore
+ * is backing up a system it seems very likely the format uses the
+ * current default encoding of the system.
+ *
+ * @NotThreadSafe
+ */
+public class DumpArchiveInputStream extends ArchiveInputStream {
+ private DumpArchiveSummary summary;
+ private DumpArchiveEntry active;
+ private boolean isClosed;
+ private boolean hasHitEOF;
+ private long entrySize;
+ private long entryOffset;
+ private int readIdx;
+ private final byte[] readBuf = new byte[DumpArchiveConstants.TP_SIZE];
+ private byte[] blockBuffer;
+ private int recordOffset;
+ private long filepos;
+ protected TapeInputStream raw;
+
+ // map of ino -> dirent entry. We can use this to reconstruct full paths.
+ private final Map names = new HashMap();
+
+ // map of ino -> (directory) entry when we're missing one or more elements in the path.
+ private final Map pending = new HashMap();
+
+ // queue of (directory) entries where we now have the full path.
+ private Queue queue;
+
+ /**
+ * The encoding to use for filenames and labels.
+ */
+ private final ZipEncoding encoding;
+
+ /**
+ * Constructor using the platform's default encoding for file
+ * names.
+ *
+ * @param is
+ * @throws ArchiveException
+ */
+ public DumpArchiveInputStream(InputStream is) throws ArchiveException {
+ this(is, null);
+ }
+
+ /**
+ * Constructor.
+ *
+ * @param is
+ * @param encoding the encoding to use for file names, use null
+ * for the platform's default encoding
+ * @since 1.6
+ */
+ public DumpArchiveInputStream(InputStream is, String encoding)
+ throws ArchiveException {
+ this.raw = new TapeInputStream(is);
+ this.hasHitEOF = false;
+ this.encoding = ZipEncodingHelper.getZipEncoding(encoding);
+
+ try {
+ // read header, verify it's a dump archive.
+ byte[] headerBytes = raw.readRecord();
+
+ if (!DumpArchiveUtil.verify(headerBytes)) {
+ throw new UnrecognizedFormatException();
+ }
+
+ // get summary information
+ summary = new DumpArchiveSummary(headerBytes, this.encoding);
+
+ // reset buffer with actual block size.
+ raw.resetBlockSize(summary.getNTRec(), summary.isCompressed());
+
+ // allocate our read buffer.
+ blockBuffer = new byte[4 * DumpArchiveConstants.TP_SIZE];
+
+ // skip past CLRI and BITS segments since we don't handle them yet.
+ readCLRI();
+ readBITS();
+ } catch (IOException ex) {
+ throw new ArchiveException(ex.getMessage(), ex);
+ }
+
+ // put in a dummy record for the root node.
+ Dirent root = new Dirent(2, 2, 4, ".");
+ names.put(Integer.valueOf(2), root);
+
+ // use priority based on queue to ensure parent directories are
+ // released first.
+ queue = new PriorityQueue(10,
+ new Comparator() {
+ public int compare(DumpArchiveEntry p, DumpArchiveEntry q) {
+ if (p.getOriginalName() == null || q.getOriginalName() == null) {
+ return Integer.MAX_VALUE;
+ }
+
+ return p.getOriginalName().compareTo(q.getOriginalName());
+ }
+ });
+ }
+
+ @Deprecated
+ @Override
+ public int getCount() {
+ return (int) getBytesRead();
+ }
+
+ @Override
+ public long getBytesRead() {
+ return raw.getBytesRead();
+ }
+
+ /**
+ * Return the archive summary information.
+ */
+ public DumpArchiveSummary getSummary() {
+ return summary;
+ }
+
+ /**
+ * Read CLRI (deleted inode) segment.
+ */
+ private void readCLRI() throws IOException {
+ byte[] buffer = raw.readRecord();
+
+ if (!DumpArchiveUtil.verify(buffer)) {
+ throw new InvalidFormatException();
+ }
+
+ active = DumpArchiveEntry.parse(buffer);
+
+ if (DumpArchiveConstants.SEGMENT_TYPE.CLRI != active.getHeaderType()) {
+ throw new InvalidFormatException();
+ }
+
+ // we don't do anything with this yet.
+ if (raw.skip(DumpArchiveConstants.TP_SIZE * active.getHeaderCount())
+ == -1) {
+ throw new EOFException();
+ }
+ readIdx = active.getHeaderCount();
+ }
+
+ /**
+ * Read BITS segment.
+ */
+ private void readBITS() throws IOException {
+ byte[] buffer = raw.readRecord();
+
+ if (!DumpArchiveUtil.verify(buffer)) {
+ throw new InvalidFormatException();
+ }
+
+ active = DumpArchiveEntry.parse(buffer);
+
+ if (DumpArchiveConstants.SEGMENT_TYPE.BITS != active.getHeaderType()) {
+ throw new InvalidFormatException();
+ }
+
+ // we don't do anything with this yet.
+ if (raw.skip(DumpArchiveConstants.TP_SIZE * active.getHeaderCount())
+ == -1) {
+ throw new EOFException();
+ }
+ readIdx = active.getHeaderCount();
+ }
+
+ /**
+ * Read the next entry.
+ */
+ public DumpArchiveEntry getNextDumpEntry() throws IOException {
+ return getNextEntry();
+ }
+
+ /**
+ * Read the next entry.
+ */
+ @Override
+ public DumpArchiveEntry getNextEntry() throws IOException {
+ DumpArchiveEntry entry = null;
+ String path = null;
+
+ // is there anything in the queue?
+ if (!queue.isEmpty()) {
+ return queue.remove();
+ }
+
+ while (entry == null) {
+ if (hasHitEOF) {
+ return null;
+ }
+
+ // skip any remaining records in this segment for prior file.
+ // we might still have holes... easiest to do it
+ // block by block. We may want to revisit this if
+ // the unnecessary decompression time adds up.
+ while (readIdx < active.getHeaderCount()) {
+ if (!active.isSparseRecord(readIdx++)
+ && raw.skip(DumpArchiveConstants.TP_SIZE) == -1) {
+ throw new EOFException();
+ }
+ }
+
+ readIdx = 0;
+ filepos = raw.getBytesRead();
+
+ byte[] headerBytes = raw.readRecord();
+
+ if (!DumpArchiveUtil.verify(headerBytes)) {
+ throw new InvalidFormatException();
+ }
+
+ active = DumpArchiveEntry.parse(headerBytes);
+
+ // skip any remaining segments for prior file.
+ while (DumpArchiveConstants.SEGMENT_TYPE.ADDR == active.getHeaderType()) {
+ if (raw.skip(DumpArchiveConstants.TP_SIZE
+ * (active.getHeaderCount()
+ - active.getHeaderHoles())) == -1) {
+ throw new EOFException();
+ }
+
+ filepos = raw.getBytesRead();
+ headerBytes = raw.readRecord();
+
+ if (!DumpArchiveUtil.verify(headerBytes)) {
+ throw new InvalidFormatException();
+ }
+
+ active = DumpArchiveEntry.parse(headerBytes);
+ }
+
+ // check if this is an end-of-volume marker.
+ if (DumpArchiveConstants.SEGMENT_TYPE.END == active.getHeaderType()) {
+ hasHitEOF = true;
+
+ return null;
+ }
+
+ entry = active;
+
+ if (entry.isDirectory()) {
+ readDirectoryEntry(active);
+
+ // now we create an empty InputStream.
+ entryOffset = 0;
+ entrySize = 0;
+ readIdx = active.getHeaderCount();
+ } else {
+ entryOffset = 0;
+ entrySize = active.getEntrySize();
+ readIdx = 0;
+ }
+
+ recordOffset = readBuf.length;
+
+ path = getPath(entry);
+
+ if (path == null) {
+ entry = null;
+ }
+ }
+
+ entry.setName(path);
+ entry.setSimpleName(names.get(Integer.valueOf(entry.getIno())).getName());
+ entry.setOffset(filepos);
+
+ return entry;
+ }
+
+ /**
+ * Read directory entry.
+ */
+ private void readDirectoryEntry(DumpArchiveEntry entry)
+ throws IOException {
+ long size = entry.getEntrySize();
+ boolean first = true;
+
+ while (first ||
+ DumpArchiveConstants.SEGMENT_TYPE.ADDR == entry.getHeaderType()) {
+ // read the header that we just peeked at.
+ if (!first) {
+ raw.readRecord();
+ }
+
+ if (!names.containsKey(Integer.valueOf(entry.getIno())) &&
+ DumpArchiveConstants.SEGMENT_TYPE.INODE == entry.getHeaderType()) {
+ pending.put(Integer.valueOf(entry.getIno()), entry);
+ }
+
+ int datalen = DumpArchiveConstants.TP_SIZE * entry.getHeaderCount();
+
+ if (blockBuffer.length < datalen) {
+ blockBuffer = new byte[datalen];
+ }
+
+ if (raw.read(blockBuffer, 0, datalen) != datalen) {
+ throw new EOFException();
+ }
+
+ int reclen = 0;
+
+ for (int i = 0; i < datalen - 8 && i < size - 8;
+ i += reclen) {
+ int ino = DumpArchiveUtil.convert32(blockBuffer, i);
+ reclen = DumpArchiveUtil.convert16(blockBuffer, i + 4);
+
+ byte type = blockBuffer[i + 6];
+
+ String name = DumpArchiveUtil.decode(encoding, blockBuffer, i + 8, blockBuffer[i + 7]);
+
+ if (".".equals(name) || "..".equals(name)) {
+ // do nothing...
+ continue;
+ }
+
+ Dirent d = new Dirent(ino, entry.getIno(), type, name);
+
+ /*
+ if ((type == 4) && names.containsKey(ino)) {
+ System.out.println("we already have ino: " +
+ names.get(ino));
+ }
+ */
+
+ names.put(Integer.valueOf(ino), d);
+
+ // check whether this allows us to fill anything in the pending list.
+ for (Map.Entry e : pending.entrySet()) {
+ String path = getPath(e.getValue());
+
+ if (path != null) {
+ e.getValue().setName(path);
+ e.getValue()
+ .setSimpleName(names.get(e.getKey()).getName());
+ queue.add(e.getValue());
+ }
+ }
+
+ // remove anything that we found. (We can't do it earlier
+ // because of concurrent modification exceptions.)
+ for (DumpArchiveEntry e : queue) {
+ pending.remove(Integer.valueOf(e.getIno()));
+ }
+ }
+
+ byte[] peekBytes = raw.peek();
+
+ if (!DumpArchiveUtil.verify(peekBytes)) {
+ throw new InvalidFormatException();
+ }
+
+ entry = DumpArchiveEntry.parse(peekBytes);
+ first = false;
+ size -= DumpArchiveConstants.TP_SIZE;
+ }
+ }
+
+ /**
+ * Get full path for specified archive entry, or null if there's a gap.
+ *
+ * @param entry
+ * @return full path for specified archive entry, or null if there's a gap.
+ */
+ private String getPath(DumpArchiveEntry entry) {
+ // build the stack of elements. It's possible that we're
+ // still missing an intermediate value and if so we
+ Stack elements = new Stack();
+ Dirent dirent = null;
+
+ for (int i = entry.getIno();; i = dirent.getParentIno()) {
+ if (!names.containsKey(Integer.valueOf(i))) {
+ elements.clear();
+ break;
+ }
+
+ dirent = names.get(Integer.valueOf(i));
+ elements.push(dirent.getName());
+
+ if (dirent.getIno() == dirent.getParentIno()) {
+ break;
+ }
+ }
+
+ // if an element is missing defer the work and read next entry.
+ if (elements.isEmpty()) {
+ pending.put(Integer.valueOf(entry.getIno()), entry);
+
+ return null;
+ }
+
+ // generate full path from stack of elements.
+ StringBuilder sb = new StringBuilder(elements.pop());
+
+ while (!elements.isEmpty()) {
+ sb.append('/');
+ sb.append(elements.pop());
+ }
+
+ return sb.toString();
+ }
+
+ /**
+ * Reads bytes from the current dump archive entry.
+ *
+ * This method is aware of the boundaries of the current
+ * entry in the archive and will deal with them as if they
+ * were this stream's start and EOF.
+ *
+ * @param buf The buffer into which to place bytes read.
+ * @param off The offset at which to place bytes read.
+ * @param len The number of bytes to read.
+ * @return The number of bytes read, or -1 at EOF.
+ * @throws IOException on error
+ */
+ @Override
+ public int read(byte[] buf, int off, int len) throws IOException {
+ int totalRead = 0;
+
+ if (hasHitEOF || isClosed || entryOffset >= entrySize) {
+ return -1;
+ }
+
+ if (len + entryOffset > entrySize) {
+ len = (int) (entrySize - entryOffset);
+ }
+
+ while (len > 0) {
+ int sz = len > readBuf.length - recordOffset
+ ? readBuf.length - recordOffset : len;
+
+ // copy any data we have
+ if (recordOffset + sz <= readBuf.length) {
+ System.arraycopy(readBuf, recordOffset, buf, off, sz);
+ totalRead += sz;
+ recordOffset += sz;
+ len -= sz;
+ off += sz;
+ }
+
+ // load next block if necessary.
+ if (len > 0) {
+ if (readIdx >= 512) {
+ byte[] headerBytes = raw.readRecord();
+
+ if (!DumpArchiveUtil.verify(headerBytes)) {
+ throw new InvalidFormatException();
+ }
+
+ active = DumpArchiveEntry.parse(headerBytes);
+ readIdx = 0;
+ }
+
+ if (!active.isSparseRecord(readIdx++)) {
+ int r = raw.read(readBuf, 0, readBuf.length);
+ if (r != readBuf.length) {
+ throw new EOFException();
+ }
+ } else {
+ Arrays.fill(readBuf, (byte) 0);
+ }
+
+ recordOffset = 0;
+ }
+ }
+
+ entryOffset += totalRead;
+
+ return totalRead;
+ }
+
+ /**
+ * Closes the stream for this entry.
+ */
+ @Override
+ public void close() throws IOException {
+ if (!isClosed) {
+ isClosed = true;
+ raw.close();
+ }
+ }
+
+ /**
+ * Look at the first few bytes of the file to decide if it's a dump
+ * archive. With 32 bytes we can look at the magic value, with a full
+ * 1k we can verify the checksum.
+ */
+ public static boolean matches(byte[] buffer, int length) {
+ // do we have enough of the header?
+ if (length < 32) {
+ return false;
+ }
+
+ // this is the best test
+ if (length >= DumpArchiveConstants.TP_SIZE) {
+ return DumpArchiveUtil.verify(buffer);
+ }
+
+ // this will work in a pinch.
+ return DumpArchiveConstants.NFS_MAGIC == DumpArchiveUtil.convert32(buffer,
+ 24);
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveSummary.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveSummary.java
new file mode 100644
index 000000000..08b9e8f20
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveSummary.java
@@ -0,0 +1,335 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+import java.io.IOException;
+import java.util.Date;
+
+import org.apache.commons.compress.archivers.zip.ZipEncoding;
+
+/**
+ * This class represents identifying information about a Dump archive volume.
+ * It consists the archive's dump date, label, hostname, device name and possibly
+ * last mount point plus the volume's volume id andfirst record number.
+ *
+ * For the corresponding C structure see the header of {@link DumpArchiveEntry}.
+ */
+public class DumpArchiveSummary {
+ private long dumpDate;
+ private long previousDumpDate;
+ private int volume;
+ private String label;
+ private int level;
+ private String filesys;
+ private String devname;
+ private String hostname;
+ private int flags;
+ private int firstrec;
+ private int ntrec;
+
+ DumpArchiveSummary(byte[] buffer, ZipEncoding encoding) throws IOException {
+ dumpDate = 1000L * DumpArchiveUtil.convert32(buffer, 4);
+ previousDumpDate = 1000L * DumpArchiveUtil.convert32(buffer, 8);
+ volume = DumpArchiveUtil.convert32(buffer, 12);
+ label = DumpArchiveUtil.decode(encoding, buffer, 676, DumpArchiveConstants.LBLSIZE).trim();
+ level = DumpArchiveUtil.convert32(buffer, 692);
+ filesys = DumpArchiveUtil.decode(encoding, buffer, 696, DumpArchiveConstants.NAMELEN).trim();
+ devname = DumpArchiveUtil.decode(encoding, buffer, 760, DumpArchiveConstants.NAMELEN).trim();
+ hostname = DumpArchiveUtil.decode(encoding, buffer, 824, DumpArchiveConstants.NAMELEN).trim();
+ flags = DumpArchiveUtil.convert32(buffer, 888);
+ firstrec = DumpArchiveUtil.convert32(buffer, 892);
+ ntrec = DumpArchiveUtil.convert32(buffer, 896);
+
+ //extAttributes = DumpArchiveUtil.convert32(buffer, 900);
+ }
+
+ /**
+ * Get the date of this dump.
+ * @return the date of this dump.
+ */
+ public Date getDumpDate() {
+ return new Date(dumpDate);
+ }
+
+ /**
+ * Set dump date.
+ */
+ public void setDumpDate(Date dumpDate) {
+ this.dumpDate = dumpDate.getTime();
+ }
+
+ /**
+ * Get the date of the previous dump at this level higher.
+ * @return dumpdate may be null
+ */
+ public Date getPreviousDumpDate() {
+ return new Date(previousDumpDate);
+ }
+
+ /**
+ * Set previous dump date.
+ */
+ public void setPreviousDumpDate(Date previousDumpDate) {
+ this.previousDumpDate = previousDumpDate.getTime();
+ }
+
+ /**
+ * Get volume (tape) number.
+ * @return volume (tape) number.
+ */
+ public int getVolume() {
+ return volume;
+ }
+
+ /**
+ * Set volume (tape) number.
+ */
+ public void setVolume(int volume) {
+ this.volume = volume;
+ }
+
+ /**
+ * Get the level of this dump. This is a number between 0 and 9, inclusive,
+ * and a level 0 dump is a complete dump of the partition. For any other dump
+ * 'n' this dump contains all files that have changed since the last dump
+ * at this level or lower. This is used to support different levels of
+ * incremental backups.
+ * @return dump level
+ */
+ public int getLevel() {
+ return level;
+ }
+
+ /**
+ * Set level.
+ */
+ public void setLevel(int level) {
+ this.level = level;
+ }
+
+ /**
+ * Get dump label. This may be autogenerated or it may be specified
+ * bu the user.
+ * @return dump label
+ */
+ public String getLabel() {
+ return label;
+ }
+
+ /**
+ * Set dump label.
+ * @param label
+ */
+ public void setLabel(String label) {
+ this.label = label;
+ }
+
+ /**
+ * Get the last mountpoint, e.g., /home.
+ * @return last mountpoint
+ */
+ public String getFilesystem() {
+ return filesys;
+ }
+
+ /**
+ * Set the last mountpoint.
+ */
+ public void setFilesystem(String filesystem) {
+ this.filesys = filesystem;
+ }
+
+ /**
+ * Get the device name, e.g., /dev/sda3 or /dev/mapper/vg0-home.
+ * @return device name
+ */
+ public String getDevname() {
+ return devname;
+ }
+
+ /**
+ * Set the device name.
+ * @param devname
+ */
+ public void setDevname(String devname) {
+ this.devname = devname;
+ }
+
+ /**
+ * Get the hostname of the system where the dump was performed.
+ * @return hostname
+ */
+ public String getHostname() {
+ return hostname;
+ }
+
+ /**
+ * Set the hostname.
+ */
+ public void setHostname(String hostname) {
+ this.hostname = hostname;
+ }
+
+ /**
+ * Get the miscellaneous flags. See below.
+ * @return flags
+ */
+ public int getFlags() {
+ return flags;
+ }
+
+ /**
+ * Set the miscellaneous flags.
+ * @param flags
+ */
+ public void setFlags(int flags) {
+ this.flags = flags;
+ }
+
+ /**
+ * Get the inode of the first record on this volume.
+ * @return inode of the first record on this volume.
+ */
+ public int getFirstRecord() {
+ return firstrec;
+ }
+
+ /**
+ * Set the inode of the first record.
+ * @param firstrec
+ */
+ public void setFirstRecord(int firstrec) {
+ this.firstrec = firstrec;
+ }
+
+ /**
+ * Get the number of records per tape block. This is typically
+ * between 10 and 32.
+ * @return the number of records per tape block
+ */
+ public int getNTRec() {
+ return ntrec;
+ }
+
+ /**
+ * Set the number of records per tape block.
+ */
+ public void setNTRec(int ntrec) {
+ this.ntrec = ntrec;
+ }
+
+ /**
+ * Is this the new header format? (We do not currently support the
+ * old format.)
+ *
+ * @return true if using new header format
+ */
+ public boolean isNewHeader() {
+ return (flags & 0x0001) == 0x0001;
+ }
+
+ /**
+ * Is this the new inode format? (We do not currently support the
+ * old format.)
+ * @return true if using new inode format
+ */
+ public boolean isNewInode() {
+ return (flags & 0x0002) == 0x0002;
+ }
+
+ /**
+ * Is this volume compressed? N.B., individual blocks may or may not be compressed.
+ * The first block is never compressed.
+ * @return true if volume is compressed
+ */
+ public boolean isCompressed() {
+ return (flags & 0x0080) == 0x0080;
+ }
+
+ /**
+ * Does this volume only contain metadata?
+ * @return true if volume only contains meta-data
+ */
+ public boolean isMetaDataOnly() {
+ return (flags & 0x0100) == 0x0100;
+ }
+
+ /**
+ * Does this volume cotain extended attributes.
+ * @return true if volume cotains extended attributes.
+ */
+ public boolean isExtendedAttributes() {
+ return (flags & 0x8000) == 0x8000;
+ }
+
+ /**
+ * @see java.lang.Object#hashCode()
+ */
+ @Override
+ public int hashCode() {
+ int hash = 17;
+
+ if (label != null) {
+ hash = label.hashCode();
+ }
+
+ hash += 31 * dumpDate;
+
+ if (hostname != null) {
+ hash = (31 * hostname.hashCode()) + 17;
+ }
+
+ if (devname != null) {
+ hash = (31 * devname.hashCode()) + 17;
+ }
+
+ return hash;
+ }
+
+ /**
+ * @see java.lang.Object#equals(Object)
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+
+ if (o == null || !o.getClass().equals(getClass())) {
+ return false;
+ }
+
+ DumpArchiveSummary rhs = (DumpArchiveSummary) o;
+
+ if (dumpDate != rhs.dumpDate) {
+ return false;
+ }
+
+ if ((getHostname() == null) ||
+ !getHostname().equals(rhs.getHostname())) {
+ return false;
+ }
+
+ if ((getDevname() == null) || !getDevname().equals(rhs.getDevname())) {
+ return false;
+ }
+
+ return true;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveUtil.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveUtil.java
new file mode 100644
index 000000000..5b7494499
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/DumpArchiveUtil.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+import java.io.IOException;
+import org.apache.commons.compress.archivers.zip.ZipEncoding;
+
+/**
+ * Various utilities for dump archives.
+ */
+class DumpArchiveUtil {
+ /**
+ * Private constructor to prevent instantiation.
+ */
+ private DumpArchiveUtil() {
+ }
+
+ /**
+ * Calculate checksum for buffer.
+ *
+ * @param buffer buffer containing tape segment header
+ * @returns checksum
+ */
+ public static int calculateChecksum(byte[] buffer) {
+ int calc = 0;
+
+ for (int i = 0; i < 256; i++) {
+ calc += DumpArchiveUtil.convert32(buffer, 4 * i);
+ }
+
+ return DumpArchiveConstants.CHECKSUM -
+ (calc - DumpArchiveUtil.convert32(buffer, 28));
+ }
+
+ /**
+ * Verify that the buffer contains a tape segment header.
+ *
+ * @param buffer
+ */
+ public static final boolean verify(byte[] buffer) {
+ // verify magic. for now only accept NFS_MAGIC.
+ int magic = convert32(buffer, 24);
+
+ if (magic != DumpArchiveConstants.NFS_MAGIC) {
+ return false;
+ }
+
+ //verify checksum...
+ int checksum = convert32(buffer, 28);
+
+ if (checksum != calculateChecksum(buffer)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Get the ino associated with this buffer.
+ *
+ * @param buffer
+ */
+ public static final int getIno(byte[] buffer) {
+ return convert32(buffer, 20);
+ }
+
+ /**
+ * Read 8-byte integer from buffer.
+ *
+ * @param buffer
+ * @param offset
+ * @return the 8-byte entry as a long
+ */
+ public static final long convert64(byte[] buffer, int offset) {
+ long i = 0;
+ i += (((long) buffer[offset + 7]) << 56);
+ i += (((long) buffer[offset + 6] << 48) & 0x00FF000000000000L);
+ i += (((long) buffer[offset + 5] << 40) & 0x0000FF0000000000L);
+ i += (((long) buffer[offset + 4] << 32) & 0x000000FF00000000L);
+ i += (((long) buffer[offset + 3] << 24) & 0x00000000FF000000L);
+ i += (((long) buffer[offset + 2] << 16) & 0x0000000000FF0000L);
+ i += (((long) buffer[offset + 1] << 8) & 0x000000000000FF00L);
+ i += (buffer[offset] & 0x00000000000000FFL);
+
+ return i;
+ }
+
+ /**
+ * Read 4-byte integer from buffer.
+ *
+ * @param buffer
+ * @param offset
+ * @return the 4-byte entry as an int
+ */
+ public static final int convert32(byte[] buffer, int offset) {
+ int i = 0;
+ i = buffer[offset + 3] << 24;
+ i += (buffer[offset + 2] << 16) & 0x00FF0000;
+ i += (buffer[offset + 1] << 8) & 0x0000FF00;
+ i += buffer[offset] & 0x000000FF;
+
+ return i;
+ }
+
+ /**
+ * Read 2-byte integer from buffer.
+ *
+ * @param buffer
+ * @param offset
+ * @return the 2-byte entry as an int
+ */
+ public static final int convert16(byte[] buffer, int offset) {
+ int i = 0;
+ i += (buffer[offset + 1] << 8) & 0x0000FF00;
+ i += buffer[offset] & 0x000000FF;
+
+ return i;
+ }
+
+ /**
+ * Decodes a byte array to a string.
+ */
+ static String decode(ZipEncoding encoding, byte[] b, int offset, int len)
+ throws IOException {
+ byte[] copy = new byte[len];
+ System.arraycopy(b, offset, copy, 0, len);
+ return encoding.decode(copy);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/InvalidFormatException.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/InvalidFormatException.java
new file mode 100644
index 000000000..2d8a9ac4a
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/InvalidFormatException.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+
+/**
+ * Invalid Format Exception. There was an error decoding a
+ * tape segment header.
+ */
+public class InvalidFormatException extends DumpArchiveException {
+ private static final long serialVersionUID = 1L;
+ protected long offset;
+
+ public InvalidFormatException() {
+ super("there was an error decoding a tape segment");
+ }
+
+ public InvalidFormatException(long offset) {
+ super("there was an error decoding a tape segment header at offset " +
+ offset + ".");
+ this.offset = offset;
+ }
+
+ public long getOffset() {
+ return offset;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/ShortFileException.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/ShortFileException.java
new file mode 100644
index 000000000..e06c97cc0
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/ShortFileException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+
+/**
+ * Short File Exception. There was an unexpected EOF when reading
+ * the input stream.
+ */
+public class ShortFileException extends DumpArchiveException {
+ private static final long serialVersionUID = 1L;
+
+ public ShortFileException() {
+ super("unexpected EOF");
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/TapeInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/TapeInputStream.java
new file mode 100644
index 000000000..d7a9a2bd7
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/TapeInputStream.java
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+import java.util.Arrays;
+import java.util.zip.DataFormatException;
+import java.util.zip.Inflater;
+
+import org.apache.commons.compress.utils.IOUtils;
+
+/**
+ * Filter stream that mimics a physical tape drive capable of compressing
+ * the data stream.
+ *
+ * @NotThreadSafe
+ */
+class TapeInputStream extends FilterInputStream {
+ private byte[] blockBuffer = new byte[DumpArchiveConstants.TP_SIZE];
+ private int currBlkIdx = -1;
+ private int blockSize = DumpArchiveConstants.TP_SIZE;
+ private static final int recordSize = DumpArchiveConstants.TP_SIZE;
+ private int readOffset = DumpArchiveConstants.TP_SIZE;
+ private boolean isCompressed = false;
+ private long bytesRead = 0;
+
+ /**
+ * Constructor
+ */
+ public TapeInputStream(InputStream in) {
+ super(in);
+ }
+
+ /**
+ * Set the DumpArchive Buffer's block size. We need to sync the block size with the
+ * dump archive's actual block size since compression is handled at the
+ * block level.
+ *
+ * @param recsPerBlock
+ * records per block
+ * @param isCompressed
+ * true if the archive is compressed
+ * @throws IOException
+ * more than one block has been read
+ * @throws IOException
+ * there was an error reading additional blocks.
+ */
+ public void resetBlockSize(int recsPerBlock, boolean isCompressed)
+ throws IOException {
+ this.isCompressed = isCompressed;
+
+ blockSize = recordSize * recsPerBlock;
+
+ // save first block in case we need it again
+ byte[] oldBuffer = blockBuffer;
+
+ // read rest of new block
+ blockBuffer = new byte[blockSize];
+ System.arraycopy(oldBuffer, 0, blockBuffer, 0, recordSize);
+ readFully(blockBuffer, recordSize, blockSize - recordSize);
+
+ this.currBlkIdx = 0;
+ this.readOffset = recordSize;
+ }
+
+ /**
+ * @see java.io.InputStream#available
+ */
+ @Override
+ public int available() throws IOException {
+ if (readOffset < blockSize) {
+ return blockSize - readOffset;
+ }
+
+ return in.available();
+ }
+
+ /**
+ * @see java.io.InputStream#read()
+ */
+ @Override
+ public int read() throws IOException {
+ throw new IllegalArgumentException(
+ "all reads must be multiple of record size (" + recordSize +
+ " bytes.");
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * reads the full given length unless EOF is reached.
+ *
+ * @param len length to read, must be a multiple of the stream's
+ * record size
+ */
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ if ((len % recordSize) != 0) {
+ throw new IllegalArgumentException(
+ "all reads must be multiple of record size (" + recordSize +
+ " bytes.");
+ }
+
+ int bytes = 0;
+
+ while (bytes < len) {
+ // we need to read from the underlying stream.
+ // this will reset readOffset value.
+ // return -1 if there's a problem.
+ if ((readOffset == blockSize) && !readBlock(true)) {
+ return -1;
+ }
+
+ int n = 0;
+
+ if ((readOffset + (len - bytes)) <= blockSize) {
+ // we can read entirely from the buffer.
+ n = len - bytes;
+ } else {
+ // copy what we can from the buffer.
+ n = blockSize - readOffset;
+ }
+
+ // copy data, increment counters.
+ System.arraycopy(blockBuffer, readOffset, b, off, n);
+ readOffset += n;
+ bytes += n;
+ off += n;
+ }
+
+ return bytes;
+ }
+
+ /**
+ * Skip bytes. Same as read but without the arraycopy.
+ *
+ * skips the full given length unless EOF is reached.
+ *
+ * @param len length to read, must be a multiple of the stream's
+ * record size
+ */
+ @Override
+ public long skip(long len) throws IOException {
+ if ((len % recordSize) != 0) {
+ throw new IllegalArgumentException(
+ "all reads must be multiple of record size (" + recordSize +
+ " bytes.");
+ }
+
+ long bytes = 0;
+
+ while (bytes < len) {
+ // we need to read from the underlying stream.
+ // this will reset readOffset value. We do not perform
+ // any decompression if we won't eventually read the data.
+ // return -1 if there's a problem.
+ if ((readOffset == blockSize) &&
+ !readBlock((len - bytes) < blockSize)) {
+ return -1;
+ }
+
+ long n = 0;
+
+ if ((readOffset + (len - bytes)) <= blockSize) {
+ // we can read entirely from the buffer.
+ n = len - bytes;
+ } else {
+ // copy what we can from the buffer.
+ n = blockSize - readOffset;
+ }
+
+ // do not copy data but still increment counters.
+ readOffset += n;
+ bytes += n;
+ }
+
+ return bytes;
+ }
+
+ /**
+ * Close the input stream.
+ *
+ * @throws IOException on error
+ */
+ @Override
+ public void close() throws IOException {
+ if (in != null && in != System.in) {
+ in.close();
+ }
+ }
+
+ /**
+ * Peek at the next record from the input stream and return the data.
+ *
+ * @return The record data.
+ * @throws IOException on error
+ */
+ public byte[] peek() throws IOException {
+ // we need to read from the underlying stream. This
+ // isn't a problem since it would be the first step in
+ // any subsequent read() anyway.
+ if ((readOffset == blockSize) && !readBlock(true)) {
+ return null;
+ }
+
+ // copy data, increment counters.
+ byte[] b = new byte[recordSize];
+ System.arraycopy(blockBuffer, readOffset, b, 0, b.length);
+
+ return b;
+ }
+
+ /**
+ * Read a record from the input stream and return the data.
+ *
+ * @return The record data.
+ * @throws IOException on error
+ */
+ public byte[] readRecord() throws IOException {
+ byte[] result = new byte[recordSize];
+
+ if (-1 == read(result, 0, result.length)) {
+ throw new ShortFileException();
+ }
+
+ return result;
+ }
+
+ /**
+ * Read next block. All decompression is handled here.
+ *
+ * @param decompress if false the buffer will not be decompressed.
+ * This is an optimization for longer seeks.
+ * @return false if End-Of-File, else true
+ */
+ private boolean readBlock(boolean decompress) throws IOException {
+ boolean success = true;
+
+ if (in == null) {
+ throw new IOException("input buffer is closed");
+ }
+
+ if (!isCompressed || (currBlkIdx == -1)) {
+ // file is not compressed
+ success = readFully(blockBuffer, 0, blockSize);
+ bytesRead += blockSize;
+ } else {
+ if (!readFully(blockBuffer, 0, 4)) {
+ return false;
+ }
+ bytesRead += 4;
+
+ int h = DumpArchiveUtil.convert32(blockBuffer, 0);
+ boolean compressed = (h & 0x01) == 0x01;
+
+ if (!compressed) {
+ // file is compressed but this block is not.
+ success = readFully(blockBuffer, 0, blockSize);
+ bytesRead += blockSize;
+ } else {
+ // this block is compressed.
+ int flags = (h >> 1) & 0x07;
+ int length = (h >> 4) & 0x0FFFFFFF;
+ byte[] compBuffer = new byte[length];
+ success = readFully(compBuffer, 0, length);
+ bytesRead += length;
+
+ if (!decompress) {
+ // just in case someone reads the data.
+ Arrays.fill(blockBuffer, (byte) 0);
+ } else {
+ switch (DumpArchiveConstants.COMPRESSION_TYPE.find(flags &
+ 0x03)) {
+ case ZLIB:
+
+ try {
+ Inflater inflator = new Inflater();
+ inflator.setInput(compBuffer, 0, compBuffer.length);
+ length = inflator.inflate(blockBuffer);
+
+ if (length != blockSize) {
+ throw new ShortFileException();
+ }
+
+ inflator.end();
+ } catch (DataFormatException e) {
+ throw new DumpArchiveException("bad data", e);
+ }
+
+ break;
+
+ case BZLIB:
+ throw new UnsupportedCompressionAlgorithmException(
+ "BZLIB2");
+
+ case LZO:
+ throw new UnsupportedCompressionAlgorithmException(
+ "LZO");
+
+ default:
+ throw new UnsupportedCompressionAlgorithmException();
+ }
+ }
+ }
+ }
+
+ currBlkIdx++;
+ readOffset = 0;
+
+ return success;
+ }
+
+ /**
+ * Read buffer
+ */
+ private boolean readFully(byte[] b, int off, int len)
+ throws IOException {
+ int count = IOUtils.readFully(in, b, off, len);
+ if (count < len) {
+ throw new ShortFileException();
+ }
+
+ return true;
+ }
+
+ /**
+ * Get number of bytes read.
+ */
+ public long getBytesRead() {
+ return bytesRead;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/UnrecognizedFormatException.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/UnrecognizedFormatException.java
new file mode 100644
index 000000000..333aeacd6
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/UnrecognizedFormatException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+
+/**
+ * Unrecognized Format Exception. This is either not a recognized dump archive or there's
+ * a bad tape segment header.
+ */
+public class UnrecognizedFormatException extends DumpArchiveException {
+ private static final long serialVersionUID = 1L;
+
+ public UnrecognizedFormatException() {
+ super("this is not a recognized format.");
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/UnsupportedCompressionAlgorithmException.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/UnsupportedCompressionAlgorithmException.java
new file mode 100644
index 000000000..3160feb24
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/UnsupportedCompressionAlgorithmException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.dump;
+
+
+/**
+ * Unsupported compression algorithm. The dump archive uses an unsupported
+ * compression algorithm (BZLIB2 or LZO).
+ */
+public class UnsupportedCompressionAlgorithmException
+ extends DumpArchiveException {
+ private static final long serialVersionUID = 1L;
+
+ public UnsupportedCompressionAlgorithmException() {
+ super("this file uses an unsupported compression algorithm.");
+ }
+
+ public UnsupportedCompressionAlgorithmException(String alg) {
+ super("this file uses an unsupported compression algorithm: " + alg +
+ ".");
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/package.html
new file mode 100644
index 000000000..72f3c68c4
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/dump/package.html
@@ -0,0 +1,56 @@
+
+
+
+ This package provides stream classes for reading archives
+ using the Unix DUMP format. This format is similar to (and
+ contemporary with) TAR but reads the raw filesystem directly.
+ This means that writers are filesystem-specific even though the
+ created archives are filesystem-agnostic.
+
+
+ Unlike other formats DUMP offers clean support for sparse files,
+ extended attributes, and other file metadata. In addition DUMP
+ supports incremental dump files can capture (most) file deletion.
+ It also provides a native form of compression and will soon support
+ native encryption as well.
+
+
+ In practice TAR archives are used for both distribution
+ and backups. DUMP archives are used exclusively for backups.
+
+
+ Like any 30+-year-old application there are a number of variants.
+ For pragmatic reasons we will only support archives with the
+ 'new' tape header and inode formats. Other restrictions:
+
+
+ - We only support ZLIB compression. The format
+ also permits LZO and BZLIB compression.
+ - Sparse files will have the holes filled.
+ - MacOS finder and resource streams are ignored.
+ - Extended attributes are not currently provided.
+ - SELinux labels are not currently provided.
+
+
+
+ As of Apache Commons Compress 1.3 support for the dump format is
+ read-only.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/JarArchiveEntry.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/JarArchiveEntry.java
new file mode 100644
index 000000000..d284ad988
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/JarArchiveEntry.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.jar;
+
+import java.security.cert.Certificate;
+import java.util.jar.Attributes;
+import java.util.jar.JarEntry;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipException;
+
+import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
+
+/**
+ *
+ * @NotThreadSafe (parent is not thread-safe)
+ */
+public class JarArchiveEntry extends ZipArchiveEntry {
+
+ // These are always null - see https://issues.apache.org/jira/browse/COMPRESS-18 for discussion
+ private final Attributes manifestAttributes = null;
+ private final Certificate[] certificates = null;
+
+ public JarArchiveEntry(ZipEntry entry) throws ZipException {
+ super(entry);
+ }
+
+ public JarArchiveEntry(String name) {
+ super(name);
+ }
+
+ public JarArchiveEntry(ZipArchiveEntry entry) throws ZipException {
+ super(entry);
+ }
+
+ public JarArchiveEntry(JarEntry entry) throws ZipException {
+ super(entry);
+
+ }
+
+ /**
+ * This method is not implemented and won't ever be.
+ * The JVM equivalent has a different name {@link java.util.jar.JarEntry#getAttributes()}
+ *
+ * @deprecated since 1.5, do not use; always returns null
+ * @return Always returns null.
+ */
+ @Deprecated
+ public Attributes getManifestAttributes() {
+ return manifestAttributes;
+ }
+
+ /**
+ * Return a copy of the list of certificates or null if there are none.
+ *
+ * @return Always returns null in the current implementation
+ *
+ * @deprecated since 1.5, not currently implemented
+ */
+ @Deprecated
+ public Certificate[] getCertificates() {
+ if (certificates != null) { // never true currently
+ Certificate[] certs = new Certificate[certificates.length];
+ System.arraycopy(certificates, 0, certs, 0, certs.length);
+ return certs;
+ }
+ /*
+ * Note, the method
+ * Certificate[] java.util.jar.JarEntry.getCertificates()
+ * also returns null or the list of certificates (but not copied)
+ */
+ return null;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/JarArchiveInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/JarArchiveInputStream.java
new file mode 100644
index 000000000..d051a4b6d
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/JarArchiveInputStream.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.jar;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
+import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
+
+/**
+ * Implements an input stream that can read entries from jar files.
+ *
+ * @NotThreadSafe
+ */
+public class JarArchiveInputStream extends ZipArchiveInputStream {
+
+ public JarArchiveInputStream( final InputStream inputStream ) {
+ super(inputStream);
+ }
+
+ public JarArchiveEntry getNextJarEntry() throws IOException {
+ ZipArchiveEntry entry = getNextZipEntry();
+ return entry == null ? null : new JarArchiveEntry(entry);
+ }
+
+ @Override
+ public ArchiveEntry getNextEntry() throws IOException {
+ return getNextJarEntry();
+ }
+
+ /**
+ * Checks if the signature matches what is expected for a jar file
+ * (in this case it is the same as for a zip file).
+ *
+ * @param signature
+ * the bytes to check
+ * @param length
+ * the number of bytes to check
+ * @return true, if this stream is a jar archive stream, false otherwise
+ */
+ public static boolean matches(byte[] signature, int length ) {
+ return ZipArchiveInputStream.matches(signature, length);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/JarArchiveOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/JarArchiveOutputStream.java
new file mode 100644
index 000000000..f372ad760
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/JarArchiveOutputStream.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.jar;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.zip.JarMarker;
+import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
+import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
+
+/**
+ * Subclass that adds a special extra field to the very first entry
+ * which allows the created archive to be used as an executable jar on
+ * Solaris.
+ *
+ * @NotThreadSafe
+ */
+public class JarArchiveOutputStream extends ZipArchiveOutputStream {
+
+ private boolean jarMarkerAdded = false;
+
+ public JarArchiveOutputStream(final OutputStream out) {
+ super(out);
+ }
+
+ // @throws ClassCastException if entry is not an instance of ZipArchiveEntry
+ @Override
+ public void putArchiveEntry(ArchiveEntry ze) throws IOException {
+ if (!jarMarkerAdded) {
+ ((ZipArchiveEntry)ze).addAsFirstExtraField(JarMarker.getInstance());
+ jarMarkerAdded = true;
+ }
+ super.putArchiveEntry(ze);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/package.html
new file mode 100644
index 000000000..09829ae6a
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/jar/package.html
@@ -0,0 +1,25 @@
+
+
+
+ Provides stream classes for reading and writing archives using
+ the ZIP format with some extensions for the special case of JAR
+ archives.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/package.html
new file mode 100644
index 000000000..df1922b4a
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/package.html
@@ -0,0 +1,24 @@
+
+
+
+ Provides a unified API and factories for dealing with archives
+ in different formats.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveEntry.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveEntry.java
new file mode 100644
index 000000000..cc5db1ca6
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveEntry.java
@@ -0,0 +1,1073 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.tar;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Date;
+import java.util.Locale;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.zip.ZipEncoding;
+import org.apache.commons.compress.utils.ArchiveUtils;
+
+/**
+ * This class represents an entry in a Tar archive. It consists
+ * of the entry's header, as well as the entry's File. Entries
+ * can be instantiated in one of three ways, depending on how
+ * they are to be used.
+ *
+ * TarEntries that are created from the header bytes read from
+ * an archive are instantiated with the TarEntry( byte[] )
+ * constructor. These entries will be used when extracting from
+ * or listing the contents of an archive. These entries have their
+ * header filled in using the header bytes. They also set the File
+ * to null, since they reference an archive entry not a file.
+ *
+ * TarEntries that are created from Files that are to be written
+ * into an archive are instantiated with the TarEntry( File )
+ * constructor. These entries have their header filled in using
+ * the File's information. They also keep a reference to the File
+ * for convenience when writing entries.
+ *
+ * Finally, TarEntries can be constructed from nothing but a name.
+ * This allows the programmer to construct the entry by hand, for
+ * instance when only an InputStream is available for writing to
+ * the archive, and the header information is constructed from
+ * other information. In this case the header fields are set to
+ * defaults and the File is set to null.
+ *
+ *
+ * The C structure for a Tar Entry's header is:
+ *
+ * struct header {
+ * char name[100]; // TarConstants.NAMELEN - offset 0
+ * char mode[8]; // TarConstants.MODELEN - offset 100
+ * char uid[8]; // TarConstants.UIDLEN - offset 108
+ * char gid[8]; // TarConstants.GIDLEN - offset 116
+ * char size[12]; // TarConstants.SIZELEN - offset 124
+ * char mtime[12]; // TarConstants.MODTIMELEN - offset 136
+ * char chksum[8]; // TarConstants.CHKSUMLEN - offset 148
+ * char linkflag[1]; // - offset 156
+ * char linkname[100]; // TarConstants.NAMELEN - offset 157
+ * The following fields are only present in new-style POSIX tar archives:
+ * char magic[6]; // TarConstants.MAGICLEN - offset 257
+ * char version[2]; // TarConstants.VERSIONLEN - offset 263
+ * char uname[32]; // TarConstants.UNAMELEN - offset 265
+ * char gname[32]; // TarConstants.GNAMELEN - offset 297
+ * char devmajor[8]; // TarConstants.DEVLEN - offset 329
+ * char devminor[8]; // TarConstants.DEVLEN - offset 337
+ * char prefix[155]; // TarConstants.PREFIXLEN - offset 345
+ * // Used if "name" field is not long enough to hold the path
+ * char pad[12]; // NULs - offset 500
+ * } header;
+ * All unused bytes are set to null.
+ * New-style GNU tar files are slightly different from the above.
+ * For values of size larger than 077777777777L (11 7s)
+ * or uid and gid larger than 07777777L (7 7s)
+ * the sign bit of the first byte is set, and the rest of the
+ * field is the binary representation of the number.
+ * See TarUtils.parseOctalOrBinary.
+ *
+ *
+ *
+ * The C structure for a old GNU Tar Entry's header is:
+ *
+ * struct oldgnu_header {
+ * char unused_pad1[345]; // TarConstants.PAD1LEN_GNU - offset 0
+ * char atime[12]; // TarConstants.ATIMELEN_GNU - offset 345
+ * char ctime[12]; // TarConstants.CTIMELEN_GNU - offset 357
+ * char offset[12]; // TarConstants.OFFSETLEN_GNU - offset 369
+ * char longnames[4]; // TarConstants.LONGNAMESLEN_GNU - offset 381
+ * char unused_pad2; // TarConstants.PAD2LEN_GNU - offset 385
+ * struct sparse sp[4]; // TarConstants.SPARSELEN_GNU - offset 386
+ * char isextended; // TarConstants.ISEXTENDEDLEN_GNU - offset 482
+ * char realsize[12]; // TarConstants.REALSIZELEN_GNU - offset 483
+ * char unused_pad[17]; // TarConstants.PAD3LEN_GNU - offset 495
+ * };
+ *
+ * Whereas, "struct sparse" is:
+ *
+ * struct sparse {
+ * char offset[12]; // offset 0
+ * char numbytes[12]; // offset 12
+ * };
+ *
+ *
+ * @NotThreadSafe
+ */
+
+public class TarArchiveEntry implements TarConstants, ArchiveEntry {
+ /** The entry's name. */
+ private String name = "";
+
+ /** The entry's permission mode. */
+ private int mode;
+
+ /** The entry's user id. */
+ private int userId = 0;
+
+ /** The entry's group id. */
+ private int groupId = 0;
+
+ /** The entry's size. */
+ private long size = 0;
+
+ /** The entry's modification time. */
+ private long modTime;
+
+ /** If the header checksum is reasonably correct. */
+ private boolean checkSumOK;
+
+ /** The entry's link flag. */
+ private byte linkFlag;
+
+ /** The entry's link name. */
+ private String linkName = "";
+
+ /** The entry's magic tag. */
+ private String magic = MAGIC_POSIX;
+ /** The version of the format */
+ private String version = VERSION_POSIX;
+
+ /** The entry's user name. */
+ private String userName;
+
+ /** The entry's group name. */
+ private String groupName = "";
+
+ /** The entry's major device number. */
+ private int devMajor = 0;
+
+ /** The entry's minor device number. */
+ private int devMinor = 0;
+
+ /** If an extension sparse header follows. */
+ private boolean isExtended;
+
+ /** The entry's real size in case of a sparse file. */
+ private long realSize;
+
+ /** The entry's file reference */
+ private final File file;
+
+ /** Maximum length of a user's name in the tar file */
+ public static final int MAX_NAMELEN = 31;
+
+ /** Default permissions bits for directories */
+ public static final int DEFAULT_DIR_MODE = 040755;
+
+ /** Default permissions bits for files */
+ public static final int DEFAULT_FILE_MODE = 0100644;
+
+ /** Convert millis to seconds */
+ public static final int MILLIS_PER_SECOND = 1000;
+
+ /**
+ * Construct an empty entry and prepares the header values.
+ */
+ private TarArchiveEntry() {
+ String user = System.getProperty("user.name", "");
+
+ if (user.length() > MAX_NAMELEN) {
+ user = user.substring(0, MAX_NAMELEN);
+ }
+
+ this.userName = user;
+ this.file = null;
+ }
+
+ /**
+ * Construct an entry with only a name. This allows the programmer
+ * to construct the entry's header "by hand". File is set to null.
+ *
+ * @param name the entry name
+ */
+ public TarArchiveEntry(String name) {
+ this(name, false);
+ }
+
+ /**
+ * Construct an entry with only a name. This allows the programmer
+ * to construct the entry's header "by hand". File is set to null.
+ *
+ * @param name the entry name
+ * @param preserveLeadingSlashes whether to allow leading slashes
+ * in the name.
+ *
+ * @since 1.1
+ */
+ public TarArchiveEntry(String name, boolean preserveLeadingSlashes) {
+ this();
+
+ name = normalizeFileName(name, preserveLeadingSlashes);
+ boolean isDir = name.endsWith("/");
+
+ this.name = name;
+ this.mode = isDir ? DEFAULT_DIR_MODE : DEFAULT_FILE_MODE;
+ this.linkFlag = isDir ? LF_DIR : LF_NORMAL;
+ this.modTime = new Date().getTime() / MILLIS_PER_SECOND;
+ this.userName = "";
+ }
+
+ /**
+ * Construct an entry with a name and a link flag.
+ *
+ * @param name the entry name
+ * @param linkFlag the entry link flag.
+ */
+ public TarArchiveEntry(String name, byte linkFlag) {
+ this(name, linkFlag, false);
+ }
+
+ /**
+ * Construct an entry with a name and a link flag.
+ *
+ * @param name the entry name
+ * @param linkFlag the entry link flag.
+ * @param preserveLeadingSlashes whether to allow leading slashes
+ * in the name.
+ *
+ * @since 1.5
+ */
+ public TarArchiveEntry(String name, byte linkFlag, boolean preserveLeadingSlashes) {
+ this(name, preserveLeadingSlashes);
+ this.linkFlag = linkFlag;
+ if (linkFlag == LF_GNUTYPE_LONGNAME) {
+ magic = MAGIC_GNU;
+ version = VERSION_GNU_SPACE;
+ }
+ }
+
+ /**
+ * Construct an entry for a file. File is set to file, and the
+ * header is constructed from information from the file.
+ * The name is set from the normalized file path.
+ *
+ * @param file The file that the entry represents.
+ */
+ public TarArchiveEntry(File file) {
+ this(file, normalizeFileName(file.getPath(), false));
+ }
+
+ /**
+ * Construct an entry for a file. File is set to file, and the
+ * header is constructed from information from the file.
+ *
+ * @param file The file that the entry represents.
+ * @param fileName the name to be used for the entry.
+ */
+ public TarArchiveEntry(File file, String fileName) {
+ this.file = file;
+
+ if (file.isDirectory()) {
+ this.mode = DEFAULT_DIR_MODE;
+ this.linkFlag = LF_DIR;
+
+ int nameLength = fileName.length();
+ if (nameLength == 0 || fileName.charAt(nameLength - 1) != '/') {
+ this.name = fileName + "/";
+ } else {
+ this.name = fileName;
+ }
+ } else {
+ this.mode = DEFAULT_FILE_MODE;
+ this.linkFlag = LF_NORMAL;
+ this.size = file.length();
+ this.name = fileName;
+ }
+
+ this.modTime = file.lastModified() / MILLIS_PER_SECOND;
+ this.userName = "";
+ }
+
+ /**
+ * Construct an entry from an archive's header bytes. File is set
+ * to null.
+ *
+ * @param headerBuf The header bytes from a tar archive entry.
+ * @throws IllegalArgumentException if any of the numeric fields have an invalid format
+ */
+ public TarArchiveEntry(byte[] headerBuf) {
+ this();
+ parseTarHeader(headerBuf);
+ }
+
+ /**
+ * Construct an entry from an archive's header bytes. File is set
+ * to null.
+ *
+ * @param headerBuf The header bytes from a tar archive entry.
+ * @param encoding encoding to use for file names
+ * @since 1.4
+ * @throws IllegalArgumentException if any of the numeric fields have an invalid format
+ */
+ public TarArchiveEntry(byte[] headerBuf, ZipEncoding encoding)
+ throws IOException {
+ this();
+ parseTarHeader(headerBuf, encoding);
+ }
+
+ /**
+ * Determine if the two entries are equal. Equality is determined
+ * by the header names being equal.
+ *
+ * @param it Entry to be checked for equality.
+ * @return True if the entries are equal.
+ */
+ public boolean equals(TarArchiveEntry it) {
+ return getName().equals(it.getName());
+ }
+
+ /**
+ * Determine if the two entries are equal. Equality is determined
+ * by the header names being equal.
+ *
+ * @param it Entry to be checked for equality.
+ * @return True if the entries are equal.
+ */
+ @Override
+ public boolean equals(Object it) {
+ if (it == null || getClass() != it.getClass()) {
+ return false;
+ }
+ return equals((TarArchiveEntry) it);
+ }
+
+ /**
+ * Hashcodes are based on entry names.
+ *
+ * @return the entry hashcode
+ */
+ @Override
+ public int hashCode() {
+ return getName().hashCode();
+ }
+
+ /**
+ * Determine if the given entry is a descendant of this entry.
+ * Descendancy is determined by the name of the descendant
+ * starting with this entry's name.
+ *
+ * @param desc Entry to be checked as a descendent of this.
+ * @return True if entry is a descendant of this.
+ */
+ public boolean isDescendent(TarArchiveEntry desc) {
+ return desc.getName().startsWith(getName());
+ }
+
+ /**
+ * Get this entry's name.
+ *
+ * @return This entry's name.
+ */
+ public String getName() {
+ return name.toString();
+ }
+
+ /**
+ * Set this entry's name.
+ *
+ * @param name This entry's new name.
+ */
+ public void setName(String name) {
+ this.name = normalizeFileName(name, false);
+ }
+
+ /**
+ * Set the mode for this entry
+ *
+ * @param mode the mode for this entry
+ */
+ public void setMode(int mode) {
+ this.mode = mode;
+ }
+
+ /**
+ * Get this entry's link name.
+ *
+ * @return This entry's link name.
+ */
+ public String getLinkName() {
+ return linkName.toString();
+ }
+
+ /**
+ * Set this entry's link name.
+ *
+ * @param link the link name to use.
+ *
+ * @since 1.1
+ */
+ public void setLinkName(String link) {
+ this.linkName = link;
+ }
+
+ /**
+ * Get this entry's user id.
+ *
+ * @return This entry's user id.
+ */
+ public int getUserId() {
+ return userId;
+ }
+
+ /**
+ * Set this entry's user id.
+ *
+ * @param userId This entry's new user id.
+ */
+ public void setUserId(int userId) {
+ this.userId = userId;
+ }
+
+ /**
+ * Get this entry's group id.
+ *
+ * @return This entry's group id.
+ */
+ public int getGroupId() {
+ return groupId;
+ }
+
+ /**
+ * Set this entry's group id.
+ *
+ * @param groupId This entry's new group id.
+ */
+ public void setGroupId(int groupId) {
+ this.groupId = groupId;
+ }
+
+ /**
+ * Get this entry's user name.
+ *
+ * @return This entry's user name.
+ */
+ public String getUserName() {
+ return userName.toString();
+ }
+
+ /**
+ * Set this entry's user name.
+ *
+ * @param userName This entry's new user name.
+ */
+ public void setUserName(String userName) {
+ this.userName = userName;
+ }
+
+ /**
+ * Get this entry's group name.
+ *
+ * @return This entry's group name.
+ */
+ public String getGroupName() {
+ return groupName.toString();
+ }
+
+ /**
+ * Set this entry's group name.
+ *
+ * @param groupName This entry's new group name.
+ */
+ public void setGroupName(String groupName) {
+ this.groupName = groupName;
+ }
+
+ /**
+ * Convenience method to set this entry's group and user ids.
+ *
+ * @param userId This entry's new user id.
+ * @param groupId This entry's new group id.
+ */
+ public void setIds(int userId, int groupId) {
+ setUserId(userId);
+ setGroupId(groupId);
+ }
+
+ /**
+ * Convenience method to set this entry's group and user names.
+ *
+ * @param userName This entry's new user name.
+ * @param groupName This entry's new group name.
+ */
+ public void setNames(String userName, String groupName) {
+ setUserName(userName);
+ setGroupName(groupName);
+ }
+
+ /**
+ * Set this entry's modification time. The parameter passed
+ * to this method is in "Java time".
+ *
+ * @param time This entry's new modification time.
+ */
+ public void setModTime(long time) {
+ modTime = time / MILLIS_PER_SECOND;
+ }
+
+ /**
+ * Set this entry's modification time.
+ *
+ * @param time This entry's new modification time.
+ */
+ public void setModTime(Date time) {
+ modTime = time.getTime() / MILLIS_PER_SECOND;
+ }
+
+ /**
+ * Set this entry's modification time.
+ *
+ * @return time This entry's new modification time.
+ */
+ public Date getModTime() {
+ return new Date(modTime * MILLIS_PER_SECOND);
+ }
+
+ public Date getLastModifiedDate() {
+ return getModTime();
+ }
+
+ /**
+ * Get this entry's checksum status.
+ *
+ * @return if the header checksum is reasonably correct
+ * @see TarUtils#verifyCheckSum(byte[])
+ * @since 1.5
+ */
+ public boolean isCheckSumOK() {
+ return checkSumOK;
+ }
+
+ /**
+ * Get this entry's file.
+ *
+ * @return This entry's file.
+ */
+ public File getFile() {
+ return file;
+ }
+
+ /**
+ * Get this entry's mode.
+ *
+ * @return This entry's mode.
+ */
+ public int getMode() {
+ return mode;
+ }
+
+ /**
+ * Get this entry's file size.
+ *
+ * @return This entry's file size.
+ */
+ public long getSize() {
+ return size;
+ }
+
+ /**
+ * Set this entry's file size.
+ *
+ * @param size This entry's new file size.
+ * @throws IllegalArgumentException if the size is < 0.
+ */
+ public void setSize(long size) {
+ if (size < 0){
+ throw new IllegalArgumentException("Size is out of range: "+size);
+ }
+ this.size = size;
+ }
+
+ /**
+ * Get this entry's major device number.
+ *
+ * @return This entry's major device number.
+ * @since 1.4
+ */
+ public int getDevMajor() {
+ return devMajor;
+ }
+
+ /**
+ * Set this entry's major device number.
+ *
+ * @param devNo This entry's major device number.
+ * @throws IllegalArgumentException if the devNo is < 0.
+ * @since 1.4
+ */
+ public void setDevMajor(int devNo) {
+ if (devNo < 0){
+ throw new IllegalArgumentException("Major device number is out of "
+ + "range: " + devNo);
+ }
+ this.devMajor = devNo;
+ }
+
+ /**
+ * Get this entry's minor device number.
+ *
+ * @return This entry's minor device number.
+ * @since 1.4
+ */
+ public int getDevMinor() {
+ return devMinor;
+ }
+
+ /**
+ * Set this entry's minor device number.
+ *
+ * @param devNo This entry's minor device number.
+ * @throws IllegalArgumentException if the devNo is < 0.
+ * @since 1.4
+ */
+ public void setDevMinor(int devNo) {
+ if (devNo < 0){
+ throw new IllegalArgumentException("Minor device number is out of "
+ + "range: " + devNo);
+ }
+ this.devMinor = devNo;
+ }
+
+ /**
+ * Indicates in case of a sparse file if an extension sparse header
+ * follows.
+ *
+ * @return true if an extension sparse header follows.
+ */
+ public boolean isExtended() {
+ return isExtended;
+ }
+
+ /**
+ * Get this entry's real file size in case of a sparse file.
+ *
+ * @return This entry's real file size.
+ */
+ public long getRealSize() {
+ return realSize;
+ }
+
+ /**
+ * Indicate if this entry is a GNU sparse block
+ *
+ * @return true if this is a sparse extension provided by GNU tar
+ */
+ public boolean isGNUSparse() {
+ return linkFlag == LF_GNUTYPE_SPARSE;
+ }
+
+ /**
+ * Indicate if this entry is a GNU long linkname block
+ *
+ * @return true if this is a long name extension provided by GNU tar
+ */
+ public boolean isGNULongLinkEntry() {
+ return linkFlag == LF_GNUTYPE_LONGLINK
+ && name.equals(GNU_LONGLINK);
+ }
+
+ /**
+ * Indicate if this entry is a GNU long name block
+ *
+ * @return true if this is a long name extension provided by GNU tar
+ */
+ public boolean isGNULongNameEntry() {
+ return linkFlag == LF_GNUTYPE_LONGNAME
+ && name.equals(GNU_LONGLINK);
+ }
+
+ /**
+ * Check if this is a Pax header.
+ *
+ * @return {@code true} if this is a Pax header.
+ *
+ * @since 1.1
+ *
+ */
+ public boolean isPaxHeader(){
+ return linkFlag == LF_PAX_EXTENDED_HEADER_LC
+ || linkFlag == LF_PAX_EXTENDED_HEADER_UC;
+ }
+
+ /**
+ * Check if this is a Pax header.
+ *
+ * @return {@code true} if this is a Pax header.
+ *
+ * @since 1.1
+ */
+ public boolean isGlobalPaxHeader(){
+ return linkFlag == LF_PAX_GLOBAL_EXTENDED_HEADER;
+ }
+
+ /**
+ * Return whether or not this entry represents a directory.
+ *
+ * @return True if this entry is a directory.
+ */
+ public boolean isDirectory() {
+ if (file != null) {
+ return file.isDirectory();
+ }
+
+ if (linkFlag == LF_DIR) {
+ return true;
+ }
+
+ if (getName().endsWith("/")) {
+ return true;
+ }
+
+ return false;
+ }
+
+ /**
+ * Check if this is a "normal file"
+ *
+ * @since 1.2
+ */
+ public boolean isFile() {
+ if (file != null) {
+ return file.isFile();
+ }
+ if (linkFlag == LF_OLDNORM || linkFlag == LF_NORMAL) {
+ return true;
+ }
+ return !getName().endsWith("/");
+ }
+
+ /**
+ * Check if this is a symbolic link entry.
+ *
+ * @since 1.2
+ */
+ public boolean isSymbolicLink() {
+ return linkFlag == LF_SYMLINK;
+ }
+
+ /**
+ * Check if this is a link entry.
+ *
+ * @since 1.2
+ */
+ public boolean isLink() {
+ return linkFlag == LF_LINK;
+ }
+
+ /**
+ * Check if this is a character device entry.
+ *
+ * @since 1.2
+ */
+ public boolean isCharacterDevice() {
+ return linkFlag == LF_CHR;
+ }
+
+ /**
+ * Check if this is a block device entry.
+ *
+ * @since 1.2
+ */
+ public boolean isBlockDevice() {
+ return linkFlag == LF_BLK;
+ }
+
+ /**
+ * Check if this is a FIFO (pipe) entry.
+ *
+ * @since 1.2
+ */
+ public boolean isFIFO() {
+ return linkFlag == LF_FIFO;
+ }
+
+ /**
+ * If this entry represents a file, and the file is a directory, return
+ * an array of TarEntries for this entry's children.
+ *
+ * @return An array of TarEntry's for this entry's children.
+ */
+ public TarArchiveEntry[] getDirectoryEntries() {
+ if (file == null || !file.isDirectory()) {
+ return new TarArchiveEntry[0];
+ }
+
+ String[] list = file.list();
+ TarArchiveEntry[] result = new TarArchiveEntry[list.length];
+
+ for (int i = 0; i < list.length; ++i) {
+ result[i] = new TarArchiveEntry(new File(file, list[i]));
+ }
+
+ return result;
+ }
+
+ /**
+ * Write an entry's header information to a header buffer.
+ *
+ * This method does not use the star/GNU tar/BSD tar extensions.
+ *
+ * @param outbuf The tar entry header buffer to fill in.
+ */
+ public void writeEntryHeader(byte[] outbuf) {
+ try {
+ writeEntryHeader(outbuf, TarUtils.DEFAULT_ENCODING, false);
+ } catch (IOException ex) {
+ try {
+ writeEntryHeader(outbuf, TarUtils.FALLBACK_ENCODING, false);
+ } catch (IOException ex2) {
+ // impossible
+ throw new RuntimeException(ex2);
+ }
+ }
+ }
+
+ /**
+ * Write an entry's header information to a header buffer.
+ *
+ * @param outbuf The tar entry header buffer to fill in.
+ * @param encoding encoding to use when writing the file name.
+ * @param starMode whether to use the star/GNU tar/BSD tar
+ * extension for numeric fields if their value doesn't fit in the
+ * maximum size of standard tar archives
+ * @since 1.4
+ */
+ public void writeEntryHeader(byte[] outbuf, ZipEncoding encoding,
+ boolean starMode) throws IOException {
+ int offset = 0;
+
+ offset = TarUtils.formatNameBytes(name, outbuf, offset, NAMELEN,
+ encoding);
+ offset = writeEntryHeaderField(mode, outbuf, offset, MODELEN, starMode);
+ offset = writeEntryHeaderField(userId, outbuf, offset, UIDLEN,
+ starMode);
+ offset = writeEntryHeaderField(groupId, outbuf, offset, GIDLEN,
+ starMode);
+ offset = writeEntryHeaderField(size, outbuf, offset, SIZELEN, starMode);
+ offset = writeEntryHeaderField(modTime, outbuf, offset, MODTIMELEN,
+ starMode);
+
+ int csOffset = offset;
+
+ for (int c = 0; c < CHKSUMLEN; ++c) {
+ outbuf[offset++] = (byte) ' ';
+ }
+
+ outbuf[offset++] = linkFlag;
+ offset = TarUtils.formatNameBytes(linkName, outbuf, offset, NAMELEN,
+ encoding);
+ offset = TarUtils.formatNameBytes(magic, outbuf, offset, MAGICLEN);
+ offset = TarUtils.formatNameBytes(version, outbuf, offset, VERSIONLEN);
+ offset = TarUtils.formatNameBytes(userName, outbuf, offset, UNAMELEN,
+ encoding);
+ offset = TarUtils.formatNameBytes(groupName, outbuf, offset, GNAMELEN,
+ encoding);
+ offset = writeEntryHeaderField(devMajor, outbuf, offset, DEVLEN,
+ starMode);
+ offset = writeEntryHeaderField(devMinor, outbuf, offset, DEVLEN,
+ starMode);
+
+ while (offset < outbuf.length) {
+ outbuf[offset++] = 0;
+ }
+
+ long chk = TarUtils.computeCheckSum(outbuf);
+
+ TarUtils.formatCheckSumOctalBytes(chk, outbuf, csOffset, CHKSUMLEN);
+ }
+
+ private int writeEntryHeaderField(long value, byte[] outbuf, int offset,
+ int length, boolean starMode) {
+ if (!starMode && (value < 0
+ || value >= 1l << 3 * (length - 1))) {
+ // value doesn't fit into field when written as octal
+ // number, will be written to PAX header or causes an
+ // error
+ return TarUtils.formatLongOctalBytes(0, outbuf, offset, length);
+ }
+ return TarUtils.formatLongOctalOrBinaryBytes(value, outbuf, offset,
+ length);
+ }
+
+ /**
+ * Parse an entry's header information from a header buffer.
+ *
+ * @param header The tar entry header buffer to get information from.
+ * @throws IllegalArgumentException if any of the numeric fields have an invalid format
+ */
+ public void parseTarHeader(byte[] header) {
+ try {
+ parseTarHeader(header, TarUtils.DEFAULT_ENCODING);
+ } catch (IOException ex) {
+ try {
+ parseTarHeader(header, TarUtils.DEFAULT_ENCODING, true);
+ } catch (IOException ex2) {
+ // not really possible
+ throw new RuntimeException(ex2);
+ }
+ }
+ }
+
+ /**
+ * Parse an entry's header information from a header buffer.
+ *
+ * @param header The tar entry header buffer to get information from.
+ * @param encoding encoding to use for file names
+ * @since 1.4
+ * @throws IllegalArgumentException if any of the numeric fields
+ * have an invalid format
+ */
+ public void parseTarHeader(byte[] header, ZipEncoding encoding)
+ throws IOException {
+ parseTarHeader(header, encoding, false);
+ }
+
+ private void parseTarHeader(byte[] header, ZipEncoding encoding,
+ final boolean oldStyle)
+ throws IOException {
+ int offset = 0;
+
+ name = oldStyle ? TarUtils.parseName(header, offset, NAMELEN)
+ : TarUtils.parseName(header, offset, NAMELEN, encoding);
+ offset += NAMELEN;
+ mode = (int) TarUtils.parseOctalOrBinary(header, offset, MODELEN);
+ offset += MODELEN;
+ userId = (int) TarUtils.parseOctalOrBinary(header, offset, UIDLEN);
+ offset += UIDLEN;
+ groupId = (int) TarUtils.parseOctalOrBinary(header, offset, GIDLEN);
+ offset += GIDLEN;
+ size = TarUtils.parseOctalOrBinary(header, offset, SIZELEN);
+ offset += SIZELEN;
+ modTime = TarUtils.parseOctalOrBinary(header, offset, MODTIMELEN);
+ offset += MODTIMELEN;
+ checkSumOK = TarUtils.verifyCheckSum(header);
+ offset += CHKSUMLEN;
+ linkFlag = header[offset++];
+ linkName = oldStyle ? TarUtils.parseName(header, offset, NAMELEN)
+ : TarUtils.parseName(header, offset, NAMELEN, encoding);
+ offset += NAMELEN;
+ magic = TarUtils.parseName(header, offset, MAGICLEN);
+ offset += MAGICLEN;
+ version = TarUtils.parseName(header, offset, VERSIONLEN);
+ offset += VERSIONLEN;
+ userName = oldStyle ? TarUtils.parseName(header, offset, UNAMELEN)
+ : TarUtils.parseName(header, offset, UNAMELEN, encoding);
+ offset += UNAMELEN;
+ groupName = oldStyle ? TarUtils.parseName(header, offset, GNAMELEN)
+ : TarUtils.parseName(header, offset, GNAMELEN, encoding);
+ offset += GNAMELEN;
+ devMajor = (int) TarUtils.parseOctalOrBinary(header, offset, DEVLEN);
+ offset += DEVLEN;
+ devMinor = (int) TarUtils.parseOctalOrBinary(header, offset, DEVLEN);
+ offset += DEVLEN;
+
+ int type = evaluateType(header);
+ switch (type) {
+ case FORMAT_OLDGNU: {
+ offset += ATIMELEN_GNU;
+ offset += CTIMELEN_GNU;
+ offset += OFFSETLEN_GNU;
+ offset += LONGNAMESLEN_GNU;
+ offset += PAD2LEN_GNU;
+ offset += SPARSELEN_GNU;
+ isExtended = TarUtils.parseBoolean(header, offset);
+ offset += ISEXTENDEDLEN_GNU;
+ realSize = TarUtils.parseOctal(header, offset, REALSIZELEN_GNU);
+ offset += REALSIZELEN_GNU;
+ break;
+ }
+ case FORMAT_POSIX:
+ default: {
+ String prefix = oldStyle
+ ? TarUtils.parseName(header, offset, PREFIXLEN)
+ : TarUtils.parseName(header, offset, PREFIXLEN, encoding);
+ // SunOS tar -E does not add / to directory names, so fix
+ // up to be consistent
+ if (isDirectory() && !name.endsWith("/")){
+ name = name + "/";
+ }
+ if (prefix.length() > 0){
+ name = prefix + "/" + name;
+ }
+ }
+ }
+ }
+
+ /**
+ * Strips Windows' drive letter as well as any leading slashes,
+ * turns path separators into forward slahes.
+ */
+ private static String normalizeFileName(String fileName,
+ boolean preserveLeadingSlashes) {
+ String osname = System.getProperty("os.name").toLowerCase(Locale.ENGLISH);
+
+ if (osname != null) {
+
+ // Strip off drive letters!
+ // REVIEW Would a better check be "(File.separator == '\')"?
+
+ if (osname.startsWith("windows")) {
+ if (fileName.length() > 2) {
+ char ch1 = fileName.charAt(0);
+ char ch2 = fileName.charAt(1);
+
+ if (ch2 == ':'
+ && (ch1 >= 'a' && ch1 <= 'z'
+ || ch1 >= 'A' && ch1 <= 'Z')) {
+ fileName = fileName.substring(2);
+ }
+ }
+ } else if (osname.indexOf("netware") > -1) {
+ int colon = fileName.indexOf(':');
+ if (colon != -1) {
+ fileName = fileName.substring(colon + 1);
+ }
+ }
+ }
+
+ fileName = fileName.replace(File.separatorChar, '/');
+
+ // No absolute pathnames
+ // Windows (and Posix?) paths can start with "\\NetworkDrive\",
+ // so we loop on starting /'s.
+ while (!preserveLeadingSlashes && fileName.startsWith("/")) {
+ fileName = fileName.substring(1);
+ }
+ return fileName;
+ }
+
+ /**
+ * Evaluate an entry's header format from a header buffer.
+ *
+ * @param header The tar entry header buffer to evaluate the format for.
+ * @return format type
+ */
+ private int evaluateType(byte[] header) {
+ if (ArchiveUtils.matchAsciiBuffer(MAGIC_GNU, header, MAGIC_OFFSET, MAGICLEN)) {
+ return FORMAT_OLDGNU;
+ }
+ if (ArchiveUtils.matchAsciiBuffer(MAGIC_POSIX, header, MAGIC_OFFSET, MAGICLEN)) {
+ return FORMAT_POSIX;
+ }
+ return 0;
+ }
+}
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveInputStream.java
new file mode 100644
index 000000000..2f4ce6eab
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveInputStream.java
@@ -0,0 +1,685 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/*
+ * This package is based on the work done by Timothy Gerard Endres
+ * (time@ice.com) to whom the Ant project is very grateful for his great code.
+ */
+
+package org.apache.commons.compress.archivers.tar;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveInputStream;
+import org.apache.commons.compress.archivers.zip.ZipEncoding;
+import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
+import org.apache.commons.compress.utils.ArchiveUtils;
+import org.apache.commons.compress.utils.CharsetNames;
+import org.apache.commons.compress.utils.IOUtils;
+
+/**
+ * The TarInputStream reads a UNIX tar archive as an InputStream.
+ * methods are provided to position at each successive entry in
+ * the archive, and the read each entry as a normal input stream
+ * using read().
+ * @NotThreadSafe
+ */
+public class TarArchiveInputStream extends ArchiveInputStream {
+
+ private static final int SMALL_BUFFER_SIZE = 256;
+
+ private final byte[] SMALL_BUF = new byte[SMALL_BUFFER_SIZE];
+
+ /** The size the TAR header */
+ private final int recordSize;
+
+ /** The size of a block */
+ private final int blockSize;
+
+ /** True if file has hit EOF */
+ private boolean hasHitEOF;
+
+ /** Size of the current entry */
+ private long entrySize;
+
+ /** How far into the entry the stream is at */
+ private long entryOffset;
+
+ /** An input stream to read from */
+ private final InputStream is;
+
+ /** The meta-data about the current entry */
+ private TarArchiveEntry currEntry;
+
+ /** The encoding of the file */
+ private final ZipEncoding encoding;
+
+ /**
+ * Constructor for TarInputStream.
+ * @param is the input stream to use
+ */
+ public TarArchiveInputStream(InputStream is) {
+ this(is, TarConstants.DEFAULT_BLKSIZE, TarConstants.DEFAULT_RCDSIZE);
+ }
+
+ /**
+ * Constructor for TarInputStream.
+ * @param is the input stream to use
+ * @param encoding name of the encoding to use for file names
+ * @since 1.4
+ */
+ public TarArchiveInputStream(InputStream is, String encoding) {
+ this(is, TarConstants.DEFAULT_BLKSIZE, TarConstants.DEFAULT_RCDSIZE,
+ encoding);
+ }
+
+ /**
+ * Constructor for TarInputStream.
+ * @param is the input stream to use
+ * @param blockSize the block size to use
+ */
+ public TarArchiveInputStream(InputStream is, int blockSize) {
+ this(is, blockSize, TarConstants.DEFAULT_RCDSIZE);
+ }
+
+ /**
+ * Constructor for TarInputStream.
+ * @param is the input stream to use
+ * @param blockSize the block size to use
+ * @param encoding name of the encoding to use for file names
+ * @since 1.4
+ */
+ public TarArchiveInputStream(InputStream is, int blockSize,
+ String encoding) {
+ this(is, blockSize, TarConstants.DEFAULT_RCDSIZE, encoding);
+ }
+
+ /**
+ * Constructor for TarInputStream.
+ * @param is the input stream to use
+ * @param blockSize the block size to use
+ * @param recordSize the record size to use
+ */
+ public TarArchiveInputStream(InputStream is, int blockSize, int recordSize) {
+ this(is, blockSize, recordSize, null);
+ }
+
+ /**
+ * Constructor for TarInputStream.
+ * @param is the input stream to use
+ * @param blockSize the block size to use
+ * @param recordSize the record size to use
+ * @param encoding name of the encoding to use for file names
+ * @since 1.4
+ */
+ public TarArchiveInputStream(InputStream is, int blockSize, int recordSize,
+ String encoding) {
+ this.is = is;
+ this.hasHitEOF = false;
+ this.encoding = ZipEncodingHelper.getZipEncoding(encoding);
+ this.recordSize = recordSize;
+ this.blockSize = blockSize;
+ }
+
+ /**
+ * Closes this stream. Calls the TarBuffer's close() method.
+ * @throws IOException on error
+ */
+ @Override
+ public void close() throws IOException {
+ is.close();
+ }
+
+ /**
+ * Get the record size being used by this stream's buffer.
+ *
+ * @return The TarBuffer record size.
+ */
+ public int getRecordSize() {
+ return recordSize;
+ }
+
+ /**
+ * Get the available data that can be read from the current
+ * entry in the archive. This does not indicate how much data
+ * is left in the entire archive, only in the current entry.
+ * This value is determined from the entry's size header field
+ * and the amount of data already read from the current entry.
+ * Integer.MAX_VALUE is returned in case more than Integer.MAX_VALUE
+ * bytes are left in the current entry in the archive.
+ *
+ * @return The number of available bytes for the current entry.
+ * @throws IOException for signature
+ */
+ @Override
+ public int available() throws IOException {
+ if (entrySize - entryOffset > Integer.MAX_VALUE) {
+ return Integer.MAX_VALUE;
+ }
+ return (int) (entrySize - entryOffset);
+ }
+
+ /**
+ * Skip bytes in the input buffer. This skips bytes in the
+ * current entry's data, not the entire archive, and will
+ * stop at the end of the current entry's data if the number
+ * to skip extends beyond that point.
+ *
+ * @param numToSkip The number of bytes to skip.
+ * @return the number actually skipped
+ * @throws IOException on error
+ */
+ @Override
+ public long skip(long numToSkip) throws IOException {
+
+ long available = entrySize - entryOffset;
+ numToSkip = Math.min(numToSkip, available);
+
+ long skipped = IOUtils.skip(is, numToSkip);
+ count(skipped);
+ entryOffset += skipped;
+ return skipped;
+ }
+
+ /**
+ * Since we do not support marking just yet, we do nothing.
+ */
+ @Override
+ public synchronized void reset() {
+ }
+
+ /**
+ * Get the next entry in this tar archive. This will skip
+ * over any remaining data in the current entry, if there
+ * is one, and place the input stream at the header of the
+ * next entry, and read the header and instantiate a new
+ * TarEntry from the header bytes and return that entry.
+ * If there are no more entries in the archive, null will
+ * be returned to indicate that the end of the archive has
+ * been reached.
+ *
+ * @return The next TarEntry in the archive, or null.
+ * @throws IOException on error
+ */
+ public TarArchiveEntry getNextTarEntry() throws IOException {
+ if (hasHitEOF) {
+ return null;
+ }
+
+ if (currEntry != null) {
+ /* Skip will only go to the end of the current entry */
+ skip(Long.MAX_VALUE);
+
+ /* skip to the end of the last record */
+ skipRecordPadding();
+ }
+
+ byte[] headerBuf = getRecord();
+
+ if (headerBuf == null) {
+ /* hit EOF */
+ currEntry = null;
+ return null;
+ }
+
+ try {
+ currEntry = new TarArchiveEntry(headerBuf, encoding);
+ } catch (IllegalArgumentException e) {
+ IOException ioe = new IOException("Error detected parsing the header");
+ ioe.initCause(e);
+ throw ioe;
+ }
+
+ entryOffset = 0;
+ entrySize = currEntry.getSize();
+
+ if (currEntry.isGNULongLinkEntry()) {
+ byte[] longLinkData = getLongNameData();
+ if (longLinkData == null) {
+ // Bugzilla: 40334
+ // Malformed tar file - long link entry name not followed by
+ // entry
+ return null;
+ }
+ currEntry.setLinkName(encoding.decode(longLinkData));
+ }
+
+ if (currEntry.isGNULongNameEntry()) {
+ byte[] longNameData = getLongNameData();
+ if (longNameData == null) {
+ // Bugzilla: 40334
+ // Malformed tar file - long entry name not followed by
+ // entry
+ return null;
+ }
+ currEntry.setName(encoding.decode(longNameData));
+ }
+
+ if (currEntry.isPaxHeader()){ // Process Pax headers
+ paxHeaders();
+ }
+
+ if (currEntry.isGNUSparse()){ // Process sparse files
+ readGNUSparse();
+ }
+
+ // If the size of the next element in the archive has changed
+ // due to a new size being reported in the posix header
+ // information, we update entrySize here so that it contains
+ // the correct value.
+ entrySize = currEntry.getSize();
+
+ return currEntry;
+ }
+
+ /**
+ * The last record block should be written at the full size, so skip any
+ * additional space used to fill a record after an entry
+ */
+ private void skipRecordPadding() throws IOException {
+ if (this.entrySize > 0 && this.entrySize % this.recordSize != 0) {
+ long numRecords = (this.entrySize / this.recordSize) + 1;
+ long padding = (numRecords * this.recordSize) - this.entrySize;
+ long skipped = IOUtils.skip(is, padding);
+ count(skipped);
+ }
+ }
+
+ /**
+ * Get the next entry in this tar archive as longname data.
+ *
+ * @return The next entry in the archive as longname data, or null.
+ * @throws IOException on error
+ */
+ protected byte[] getLongNameData() throws IOException {
+ // read in the name
+ ByteArrayOutputStream longName = new ByteArrayOutputStream();
+ int length = 0;
+ while ((length = read(SMALL_BUF)) >= 0) {
+ longName.write(SMALL_BUF, 0, length);
+ }
+ getNextEntry();
+ if (currEntry == null) {
+ // Bugzilla: 40334
+ // Malformed tar file - long entry name not followed by entry
+ return null;
+ }
+ byte[] longNameData = longName.toByteArray();
+ // remove trailing null terminator(s)
+ length = longNameData.length;
+ while (length > 0 && longNameData[length - 1] == 0) {
+ --length;
+ }
+ if (length != longNameData.length) {
+ byte[] l = new byte[length];
+ System.arraycopy(longNameData, 0, l, 0, length);
+ longNameData = l;
+ }
+ return longNameData;
+ }
+
+ /**
+ * Get the next record in this tar archive. This will skip
+ * over any remaining data in the current entry, if there
+ * is one, and place the input stream at the header of the
+ * next entry.
+ *
+ * If there are no more entries in the archive, null will be
+ * returned to indicate that the end of the archive has been
+ * reached. At the same time the {@code hasHitEOF} marker will be
+ * set to true.
+ *
+ * @return The next header in the archive, or null.
+ * @throws IOException on error
+ */
+ private byte[] getRecord() throws IOException {
+ byte[] headerBuf = readRecord();
+ hasHitEOF = isEOFRecord(headerBuf);
+ if (hasHitEOF && headerBuf != null) {
+ tryToConsumeSecondEOFRecord();
+ consumeRemainderOfLastBlock();
+ headerBuf = null;
+ }
+ return headerBuf;
+ }
+
+ /**
+ * Determine if an archive record indicate End of Archive. End of
+ * archive is indicated by a record that consists entirely of null bytes.
+ *
+ * @param record The record data to check.
+ * @return true if the record data is an End of Archive
+ */
+ protected boolean isEOFRecord(byte[] record) {
+ return record == null || ArchiveUtils.isArrayZero(record, recordSize);
+ }
+
+ /**
+ * Read a record from the input stream and return the data.
+ *
+ * @return The record data or null if EOF has been hit.
+ * @throws IOException on error
+ */
+ protected byte[] readRecord() throws IOException {
+
+ byte[] record = new byte[recordSize];
+
+ int readNow = IOUtils.readFully(is, record);
+ count(readNow);
+ if (readNow != recordSize) {
+ return null;
+ }
+
+ return record;
+ }
+
+ private void paxHeaders() throws IOException{
+ Map headers = parsePaxHeaders(this);
+ getNextEntry(); // Get the actual file entry
+ applyPaxHeadersToCurrentEntry(headers);
+ }
+
+ Map parsePaxHeaders(InputStream i) throws IOException {
+ Map headers = new HashMap();
+ // Format is "length keyword=value\n";
+ while(true){ // get length
+ int ch;
+ int len = 0;
+ int read = 0;
+ while((ch = i.read()) != -1) {
+ read++;
+ if (ch == ' '){ // End of length string
+ // Get keyword
+ ByteArrayOutputStream coll = new ByteArrayOutputStream();
+ while((ch = i.read()) != -1) {
+ read++;
+ if (ch == '='){ // end of keyword
+ String keyword = coll.toString(CharsetNames.UTF_8);
+ // Get rest of entry
+ byte[] rest = new byte[len - read];
+ int got = i.read(rest);
+ if (got != len - read){
+ throw new IOException("Failed to read "
+ + "Paxheader. Expected "
+ + (len - read)
+ + " bytes, read "
+ + got);
+ }
+ // Drop trailing NL
+ String value = new String(rest, 0,
+ len - read - 1, CharsetNames.UTF_8);
+ headers.put(keyword, value);
+ break;
+ }
+ coll.write((byte) ch);
+ }
+ break; // Processed single header
+ }
+ len *= 10;
+ len += ch - '0';
+ }
+ if (ch == -1){ // EOF
+ break;
+ }
+ }
+ return headers;
+ }
+
+ private void applyPaxHeadersToCurrentEntry(Map headers) {
+ /*
+ * The following headers are defined for Pax.
+ * atime, ctime, charset: cannot use these without changing TarArchiveEntry fields
+ * mtime
+ * comment
+ * gid, gname
+ * linkpath
+ * size
+ * uid,uname
+ * SCHILY.devminor, SCHILY.devmajor: don't have setters/getters for those
+ */
+ for (Entry ent : headers.entrySet()){
+ String key = ent.getKey();
+ String val = ent.getValue();
+ if ("path".equals(key)){
+ currEntry.setName(val);
+ } else if ("linkpath".equals(key)){
+ currEntry.setLinkName(val);
+ } else if ("gid".equals(key)){
+ currEntry.setGroupId(Integer.parseInt(val));
+ } else if ("gname".equals(key)){
+ currEntry.setGroupName(val);
+ } else if ("uid".equals(key)){
+ currEntry.setUserId(Integer.parseInt(val));
+ } else if ("uname".equals(key)){
+ currEntry.setUserName(val);
+ } else if ("size".equals(key)){
+ currEntry.setSize(Long.parseLong(val));
+ } else if ("mtime".equals(key)){
+ currEntry.setModTime((long) (Double.parseDouble(val) * 1000));
+ } else if ("SCHILY.devminor".equals(key)){
+ currEntry.setDevMinor(Integer.parseInt(val));
+ } else if ("SCHILY.devmajor".equals(key)){
+ currEntry.setDevMajor(Integer.parseInt(val));
+ }
+ }
+ }
+
+ /**
+ * Adds the sparse chunks from the current entry to the sparse chunks,
+ * including any additional sparse entries following the current entry.
+ *
+ * @throws IOException on error
+ *
+ * @todo Sparse files get not yet really processed.
+ */
+ private void readGNUSparse() throws IOException {
+ /* we do not really process sparse files yet
+ sparses = new ArrayList();
+ sparses.addAll(currEntry.getSparses());
+ */
+ if (currEntry.isExtended()) {
+ TarArchiveSparseEntry entry;
+ do {
+ byte[] headerBuf = getRecord();
+ if (headerBuf == null) {
+ currEntry = null;
+ break;
+ }
+ entry = new TarArchiveSparseEntry(headerBuf);
+ /* we do not really process sparse files yet
+ sparses.addAll(entry.getSparses());
+ */
+ } while (entry.isExtended());
+ }
+ }
+
+ /**
+ * Returns the next Archive Entry in this Stream.
+ *
+ * @return the next entry,
+ * or {@code null} if there are no more entries
+ * @throws IOException if the next entry could not be read
+ */
+ @Override
+ public ArchiveEntry getNextEntry() throws IOException {
+ return getNextTarEntry();
+ }
+
+ /**
+ * Tries to read the next record rewinding the stream if it is not a EOF record.
+ *
+ * This is meant to protect against cases where a tar
+ * implementation has written only one EOF record when two are
+ * expected. Actually this won't help since a non-conforming
+ * implementation likely won't fill full blocks consisting of - by
+ * default - ten records either so we probably have already read
+ * beyond the archive anyway.
+ */
+ private void tryToConsumeSecondEOFRecord() throws IOException {
+ boolean shouldReset = true;
+ boolean marked = is.markSupported();
+ if (marked) {
+ is.mark(recordSize);
+ }
+ try {
+ shouldReset = !isEOFRecord(readRecord());
+ } finally {
+ if (shouldReset && marked) {
+ pushedBackBytes(recordSize);
+ is.reset();
+ }
+ }
+ }
+
+ /**
+ * Reads bytes from the current tar archive entry.
+ *
+ * This method is aware of the boundaries of the current
+ * entry in the archive and will deal with them as if they
+ * were this stream's start and EOF.
+ *
+ * @param buf The buffer into which to place bytes read.
+ * @param offset The offset at which to place bytes read.
+ * @param numToRead The number of bytes to read.
+ * @return The number of bytes read, or -1 at EOF.
+ * @throws IOException on error
+ */
+ @Override
+ public int read(byte[] buf, int offset, int numToRead) throws IOException {
+ int totalRead = 0;
+
+ if (hasHitEOF || entryOffset >= entrySize) {
+ return -1;
+ }
+
+ numToRead = Math.min(numToRead, available());
+
+ totalRead = is.read(buf, offset, numToRead);
+ count(totalRead);
+
+ if (totalRead == -1) {
+ hasHitEOF = true;
+ } else {
+ entryOffset += totalRead;
+ }
+
+ return totalRead;
+ }
+
+ /**
+ * Whether this class is able to read the given entry.
+ *
+ * May return false if the current entry is a sparse file.
+ */
+ @Override
+ public boolean canReadEntryData(ArchiveEntry ae) {
+ if (ae instanceof TarArchiveEntry) {
+ TarArchiveEntry te = (TarArchiveEntry) ae;
+ return !te.isGNUSparse();
+ }
+ return false;
+ }
+
+ /**
+ * Get the current TAR Archive Entry that this input stream is processing
+ *
+ * @return The current Archive Entry
+ */
+ public TarArchiveEntry getCurrentEntry() {
+ return currEntry;
+ }
+
+ protected final void setCurrentEntry(TarArchiveEntry e) {
+ currEntry = e;
+ }
+
+ protected final boolean isAtEOF() {
+ return hasHitEOF;
+ }
+
+ protected final void setAtEOF(boolean b) {
+ hasHitEOF = b;
+ }
+
+ /**
+ * This method is invoked once the end of the archive is hit, it
+ * tries to consume the remaining bytes under the assumption that
+ * the tool creating this archive has padded the last block.
+ */
+ private void consumeRemainderOfLastBlock() throws IOException {
+ long bytesReadOfLastBlock = getBytesRead() % blockSize;
+ if (bytesReadOfLastBlock > 0) {
+ long skipped = IOUtils.skip(is, blockSize - bytesReadOfLastBlock);
+ count(skipped);
+ }
+ }
+
+ /**
+ * Checks if the signature matches what is expected for a tar file.
+ *
+ * @param signature
+ * the bytes to check
+ * @param length
+ * the number of bytes to check
+ * @return true, if this stream is a tar archive stream, false otherwise
+ */
+ public static boolean matches(byte[] signature, int length) {
+ if (length < TarConstants.VERSION_OFFSET+TarConstants.VERSIONLEN) {
+ return false;
+ }
+
+ if (ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_POSIX,
+ signature, TarConstants.MAGIC_OFFSET, TarConstants.MAGICLEN)
+ &&
+ ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_POSIX,
+ signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN)
+ ){
+ return true;
+ }
+ if (ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_GNU,
+ signature, TarConstants.MAGIC_OFFSET, TarConstants.MAGICLEN)
+ &&
+ (
+ ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_GNU_SPACE,
+ signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN)
+ ||
+ ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_GNU_ZERO,
+ signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN)
+ )
+ ){
+ return true;
+ }
+ // COMPRESS-107 - recognise Ant tar files
+ if (ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_ANT,
+ signature, TarConstants.MAGIC_OFFSET, TarConstants.MAGICLEN)
+ &&
+ ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_ANT,
+ signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN)
+ ){
+ return true;
+ }
+ return false;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveOutputStream.java
new file mode 100644
index 000000000..8dbc4aaf3
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveOutputStream.java
@@ -0,0 +1,666 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.tar;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.StringWriter;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.zip.ZipEncoding;
+import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
+import org.apache.commons.compress.utils.CharsetNames;
+import org.apache.commons.compress.utils.CountingOutputStream;
+
+/**
+ * The TarOutputStream writes a UNIX tar archive as an OutputStream.
+ * Methods are provided to put entries, and then write their contents
+ * by writing to this stream using write().
+ * @NotThreadSafe
+ */
+public class TarArchiveOutputStream extends ArchiveOutputStream {
+ /** Fail if a long file name is required in the archive. */
+ public static final int LONGFILE_ERROR = 0;
+
+ /** Long paths will be truncated in the archive. */
+ public static final int LONGFILE_TRUNCATE = 1;
+
+ /** GNU tar extensions are used to store long file names in the archive. */
+ public static final int LONGFILE_GNU = 2;
+
+ /** POSIX/PAX extensions are used to store long file names in the archive. */
+ public static final int LONGFILE_POSIX = 3;
+
+ /** Fail if a big number (e.g. size > 8GiB) is required in the archive. */
+ public static final int BIGNUMBER_ERROR = 0;
+
+ /** star/GNU tar/BSD tar extensions are used to store big number in the archive. */
+ public static final int BIGNUMBER_STAR = 1;
+
+ /** POSIX/PAX extensions are used to store big numbers in the archive. */
+ public static final int BIGNUMBER_POSIX = 2;
+
+ private long currSize;
+ private String currName;
+ private long currBytes;
+ private final byte[] recordBuf;
+ private int assemLen;
+ private final byte[] assemBuf;
+ private int longFileMode = LONGFILE_ERROR;
+ private int bigNumberMode = BIGNUMBER_ERROR;
+ private int recordsWritten;
+ private final int recordsPerBlock;
+ private final int recordSize;
+
+ private boolean closed = false;
+
+ /** Indicates if putArchiveEntry has been called without closeArchiveEntry */
+ private boolean haveUnclosedEntry = false;
+
+ /** indicates if this archive is finished */
+ private boolean finished = false;
+
+ private final OutputStream out;
+
+ private final ZipEncoding encoding;
+
+ private boolean addPaxHeadersForNonAsciiNames = false;
+ private static final ZipEncoding ASCII =
+ ZipEncodingHelper.getZipEncoding("ASCII");
+
+ /**
+ * Constructor for TarInputStream.
+ * @param os the output stream to use
+ */
+ public TarArchiveOutputStream(OutputStream os) {
+ this(os, TarConstants.DEFAULT_BLKSIZE, TarConstants.DEFAULT_RCDSIZE);
+ }
+
+ /**
+ * Constructor for TarInputStream.
+ * @param os the output stream to use
+ * @param encoding name of the encoding to use for file names
+ * @since 1.4
+ */
+ public TarArchiveOutputStream(OutputStream os, String encoding) {
+ this(os, TarConstants.DEFAULT_BLKSIZE, TarConstants.DEFAULT_RCDSIZE, encoding);
+ }
+
+ /**
+ * Constructor for TarInputStream.
+ * @param os the output stream to use
+ * @param blockSize the block size to use
+ */
+ public TarArchiveOutputStream(OutputStream os, int blockSize) {
+ this(os, blockSize, TarConstants.DEFAULT_RCDSIZE);
+ }
+
+ /**
+ * Constructor for TarInputStream.
+ * @param os the output stream to use
+ * @param blockSize the block size to use
+ * @param encoding name of the encoding to use for file names
+ * @since 1.4
+ */
+ public TarArchiveOutputStream(OutputStream os, int blockSize,
+ String encoding) {
+ this(os, blockSize, TarConstants.DEFAULT_RCDSIZE, encoding);
+ }
+
+ /**
+ * Constructor for TarInputStream.
+ * @param os the output stream to use
+ * @param blockSize the block size to use
+ * @param recordSize the record size to use
+ */
+ public TarArchiveOutputStream(OutputStream os, int blockSize, int recordSize) {
+ this(os, blockSize, recordSize, null);
+ }
+
+ /**
+ * Constructor for TarInputStream.
+ * @param os the output stream to use
+ * @param blockSize the block size to use
+ * @param recordSize the record size to use
+ * @param encoding name of the encoding to use for file names
+ * @since 1.4
+ */
+ public TarArchiveOutputStream(OutputStream os, int blockSize,
+ int recordSize, String encoding) {
+ out = new CountingOutputStream(os);
+ this.encoding = ZipEncodingHelper.getZipEncoding(encoding);
+
+ this.assemLen = 0;
+ this.assemBuf = new byte[recordSize];
+ this.recordBuf = new byte[recordSize];
+ this.recordSize = recordSize;
+ this.recordsPerBlock = blockSize / recordSize;
+ }
+
+ /**
+ * Set the long file mode.
+ * This can be LONGFILE_ERROR(0), LONGFILE_TRUNCATE(1) or LONGFILE_GNU(2).
+ * This specifies the treatment of long file names (names >= TarConstants.NAMELEN).
+ * Default is LONGFILE_ERROR.
+ * @param longFileMode the mode to use
+ */
+ public void setLongFileMode(int longFileMode) {
+ this.longFileMode = longFileMode;
+ }
+
+ /**
+ * Set the big number mode.
+ * This can be BIGNUMBER_ERROR(0), BIGNUMBER_POSIX(1) or BIGNUMBER_STAR(2).
+ * This specifies the treatment of big files (sizes > TarConstants.MAXSIZE) and other numeric values to big to fit into a traditional tar header.
+ * Default is BIGNUMBER_ERROR.
+ * @param bigNumberMode the mode to use
+ * @since 1.4
+ */
+ public void setBigNumberMode(int bigNumberMode) {
+ this.bigNumberMode = bigNumberMode;
+ }
+
+ /**
+ * Whether to add a PAX extension header for non-ASCII file names.
+ * @since 1.4
+ */
+ public void setAddPaxHeadersForNonAsciiNames(boolean b) {
+ addPaxHeadersForNonAsciiNames = b;
+ }
+
+ @Deprecated
+ @Override
+ public int getCount() {
+ return (int) getBytesWritten();
+ }
+
+ @Override
+ public long getBytesWritten() {
+ return ((CountingOutputStream) out).getBytesWritten();
+ }
+
+ /**
+ * Ends the TAR archive without closing the underlying OutputStream.
+ *
+ * An archive consists of a series of file entries terminated by an
+ * end-of-archive entry, which consists of two 512 blocks of zero bytes.
+ * POSIX.1 requires two EOF records, like some other implementations.
+ *
+ * @throws IOException on error
+ */
+ @Override
+ public void finish() throws IOException {
+ if (finished) {
+ throw new IOException("This archive has already been finished");
+ }
+
+ if (haveUnclosedEntry) {
+ throw new IOException("This archives contains unclosed entries.");
+ }
+ writeEOFRecord();
+ writeEOFRecord();
+ padAsNeeded();
+ out.flush();
+ finished = true;
+ }
+
+ /**
+ * Closes the underlying OutputStream.
+ * @throws IOException on error
+ */
+ @Override
+ public void close() throws IOException {
+ if (!finished) {
+ finish();
+ }
+
+ if (!closed) {
+ out.close();
+ closed = true;
+ }
+ }
+
+ /**
+ * Get the record size being used by this stream's TarBuffer.
+ *
+ * @return The TarBuffer record size.
+ */
+ public int getRecordSize() {
+ return this.recordSize;
+ }
+
+ /**
+ * Put an entry on the output stream. This writes the entry's
+ * header record and positions the output stream for writing
+ * the contents of the entry. Once this method is called, the
+ * stream is ready for calls to write() to write the entry's
+ * contents. Once the contents are written, closeArchiveEntry()
+ * MUST be called to ensure that all buffered data
+ * is completely written to the output stream.
+ *
+ * @param archiveEntry The TarEntry to be written to the archive.
+ * @throws IOException on error
+ * @throws ClassCastException if archiveEntry is not an instance of TarArchiveEntry
+ */
+ @Override
+ public void putArchiveEntry(ArchiveEntry archiveEntry) throws IOException {
+ if(finished) {
+ throw new IOException("Stream has already been finished");
+ }
+ TarArchiveEntry entry = (TarArchiveEntry) archiveEntry;
+ Map paxHeaders = new HashMap();
+ final String entryName = entry.getName();
+ boolean paxHeaderContainsPath = handleLongName(entryName, paxHeaders, "path",
+ TarConstants.LF_GNUTYPE_LONGNAME, "file name");
+
+ final String linkName = entry.getLinkName();
+ boolean paxHeaderContainsLinkPath = linkName != null && linkName.length() > 0
+ && handleLongName(linkName, paxHeaders, "linkpath",
+ TarConstants.LF_GNUTYPE_LONGLINK, "link name");
+
+ if (bigNumberMode == BIGNUMBER_POSIX) {
+ addPaxHeadersForBigNumbers(paxHeaders, entry);
+ } else if (bigNumberMode != BIGNUMBER_STAR) {
+ failForBigNumbers(entry);
+ }
+
+ if (addPaxHeadersForNonAsciiNames && !paxHeaderContainsPath
+ && !ASCII.canEncode(entryName)) {
+ paxHeaders.put("path", entryName);
+ }
+
+ if (addPaxHeadersForNonAsciiNames && !paxHeaderContainsLinkPath
+ && (entry.isLink() || entry.isSymbolicLink())
+ && !ASCII.canEncode(linkName)) {
+ paxHeaders.put("linkpath", linkName);
+ }
+
+ if (paxHeaders.size() > 0) {
+ writePaxHeaders(entryName, paxHeaders);
+ }
+
+ entry.writeEntryHeader(recordBuf, encoding,
+ bigNumberMode == BIGNUMBER_STAR);
+ writeRecord(recordBuf);
+
+ currBytes = 0;
+
+ if (entry.isDirectory()) {
+ currSize = 0;
+ } else {
+ currSize = entry.getSize();
+ }
+ currName = entryName;
+ haveUnclosedEntry = true;
+ }
+
+ /**
+ * Close an entry. This method MUST be called for all file
+ * entries that contain data. The reason is that we must
+ * buffer data written to the stream in order to satisfy
+ * the buffer's record based writes. Thus, there may be
+ * data fragments still being assembled that must be written
+ * to the output stream before this entry is closed and the
+ * next entry written.
+ * @throws IOException on error
+ */
+ @Override
+ public void closeArchiveEntry() throws IOException {
+ if (finished) {
+ throw new IOException("Stream has already been finished");
+ }
+ if (!haveUnclosedEntry){
+ throw new IOException("No current entry to close");
+ }
+ if (assemLen > 0) {
+ for (int i = assemLen; i < assemBuf.length; ++i) {
+ assemBuf[i] = 0;
+ }
+
+ writeRecord(assemBuf);
+
+ currBytes += assemLen;
+ assemLen = 0;
+ }
+
+ if (currBytes < currSize) {
+ throw new IOException("entry '" + currName + "' closed at '"
+ + currBytes
+ + "' before the '" + currSize
+ + "' bytes specified in the header were written");
+ }
+ haveUnclosedEntry = false;
+ }
+
+ /**
+ * Writes bytes to the current tar archive entry. This method
+ * is aware of the current entry and will throw an exception if
+ * you attempt to write bytes past the length specified for the
+ * current entry. The method is also (painfully) aware of the
+ * record buffering required by TarBuffer, and manages buffers
+ * that are not a multiple of recordsize in length, including
+ * assembling records from small buffers.
+ *
+ * @param wBuf The buffer to write to the archive.
+ * @param wOffset The offset in the buffer from which to get bytes.
+ * @param numToWrite The number of bytes to write.
+ * @throws IOException on error
+ */
+ @Override
+ public void write(byte[] wBuf, int wOffset, int numToWrite) throws IOException {
+ if (currBytes + numToWrite > currSize) {
+ throw new IOException("request to write '" + numToWrite
+ + "' bytes exceeds size in header of '"
+ + currSize + "' bytes for entry '"
+ + currName + "'");
+
+ //
+ // We have to deal with assembly!!!
+ // The programmer can be writing little 32 byte chunks for all
+ // we know, and we must assemble complete records for writing.
+ // REVIEW Maybe this should be in TarBuffer? Could that help to
+ // eliminate some of the buffer copying.
+ //
+ }
+
+ if (assemLen > 0) {
+ if (assemLen + numToWrite >= recordBuf.length) {
+ int aLen = recordBuf.length - assemLen;
+
+ System.arraycopy(assemBuf, 0, recordBuf, 0,
+ assemLen);
+ System.arraycopy(wBuf, wOffset, recordBuf,
+ assemLen, aLen);
+ writeRecord(recordBuf);
+
+ currBytes += recordBuf.length;
+ wOffset += aLen;
+ numToWrite -= aLen;
+ assemLen = 0;
+ } else {
+ System.arraycopy(wBuf, wOffset, assemBuf, assemLen,
+ numToWrite);
+
+ wOffset += numToWrite;
+ assemLen += numToWrite;
+ numToWrite = 0;
+ }
+ }
+
+ //
+ // When we get here we have EITHER:
+ // o An empty "assemble" buffer.
+ // o No bytes to write (numToWrite == 0)
+ //
+ while (numToWrite > 0) {
+ if (numToWrite < recordBuf.length) {
+ System.arraycopy(wBuf, wOffset, assemBuf, assemLen,
+ numToWrite);
+
+ assemLen += numToWrite;
+
+ break;
+ }
+
+ writeRecord(wBuf, wOffset);
+
+ int num = recordBuf.length;
+
+ currBytes += num;
+ numToWrite -= num;
+ wOffset += num;
+ }
+ }
+
+ /**
+ * Writes a PAX extended header with the given map as contents.
+ * @since 1.4
+ */
+ void writePaxHeaders(String entryName,
+ Map headers) throws IOException {
+ String name = "./PaxHeaders.X/" + stripTo7Bits(entryName);
+ if (name.length() >= TarConstants.NAMELEN) {
+ name = name.substring(0, TarConstants.NAMELEN - 1);
+ }
+ while (name.endsWith("/")) {
+ // TarEntry's constructor would think this is a directory
+ // and not allow any data to be written
+ name = name.substring(0, name.length() - 1);
+ }
+ TarArchiveEntry pex = new TarArchiveEntry(name,
+ TarConstants.LF_PAX_EXTENDED_HEADER_LC);
+
+ StringWriter w = new StringWriter();
+ for (Map.Entry h : headers.entrySet()) {
+ String key = h.getKey();
+ String value = h.getValue();
+ int len = key.length() + value.length()
+ + 3 /* blank, equals and newline */
+ + 2 /* guess 9 < actual length < 100 */;
+ String line = len + " " + key + "=" + value + "\n";
+ int actualLength = line.getBytes(CharsetNames.UTF_8).length;
+ while (len != actualLength) {
+ // Adjust for cases where length < 10 or > 100
+ // or where UTF-8 encoding isn't a single octet
+ // per character.
+ // Must be in loop as size may go from 99 to 100 in
+ // first pass so we'd need a second.
+ len = actualLength;
+ line = len + " " + key + "=" + value + "\n";
+ actualLength = line.getBytes(CharsetNames.UTF_8).length;
+ }
+ w.write(line);
+ }
+ byte[] data = w.toString().getBytes(CharsetNames.UTF_8);
+ pex.setSize(data.length);
+ putArchiveEntry(pex);
+ write(data);
+ closeArchiveEntry();
+ }
+
+ private String stripTo7Bits(String name) {
+ final int length = name.length();
+ StringBuilder result = new StringBuilder(length);
+ for (int i = 0; i < length; i++) {
+ char stripped = (char) (name.charAt(i) & 0x7F);
+ if (stripped != 0) { // would be read as Trailing null
+ result.append(stripped);
+ }
+ }
+ return result.toString();
+ }
+
+ /**
+ * Write an EOF (end of archive) record to the tar archive.
+ * An EOF record consists of a record of all zeros.
+ */
+ private void writeEOFRecord() throws IOException {
+ Arrays.fill(recordBuf, (byte) 0);
+ writeRecord(recordBuf);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ out.flush();
+ }
+
+ @Override
+ public ArchiveEntry createArchiveEntry(File inputFile, String entryName)
+ throws IOException {
+ if(finished) {
+ throw new IOException("Stream has already been finished");
+ }
+ return new TarArchiveEntry(inputFile, entryName);
+ }
+
+ /**
+ * Write an archive record to the archive.
+ *
+ * @param record The record data to write to the archive.
+ * @throws IOException on error
+ */
+ private void writeRecord(byte[] record) throws IOException {
+ if (record.length != recordSize) {
+ throw new IOException("record to write has length '"
+ + record.length
+ + "' which is not the record size of '"
+ + recordSize + "'");
+ }
+
+ out.write(record);
+ recordsWritten++;
+ }
+
+ /**
+ * Write an archive record to the archive, where the record may be
+ * inside of a larger array buffer. The buffer must be "offset plus
+ * record size" long.
+ *
+ * @param buf The buffer containing the record data to write.
+ * @param offset The offset of the record data within buf.
+ * @throws IOException on error
+ */
+ private void writeRecord(byte[] buf, int offset) throws IOException {
+
+ if (offset + recordSize > buf.length) {
+ throw new IOException("record has length '" + buf.length
+ + "' with offset '" + offset
+ + "' which is less than the record size of '"
+ + recordSize + "'");
+ }
+
+ out.write(buf, offset, recordSize);
+ recordsWritten++;
+ }
+
+ private void padAsNeeded() throws IOException {
+ int start = recordsWritten % recordsPerBlock;
+ if (start != 0) {
+ for (int i = start; i < recordsPerBlock; i++) {
+ writeEOFRecord();
+ }
+ }
+ }
+
+ private void addPaxHeadersForBigNumbers(Map paxHeaders,
+ TarArchiveEntry entry) {
+ addPaxHeaderForBigNumber(paxHeaders, "size", entry.getSize(),
+ TarConstants.MAXSIZE);
+ addPaxHeaderForBigNumber(paxHeaders, "gid", entry.getGroupId(),
+ TarConstants.MAXID);
+ addPaxHeaderForBigNumber(paxHeaders, "mtime",
+ entry.getModTime().getTime() / 1000,
+ TarConstants.MAXSIZE);
+ addPaxHeaderForBigNumber(paxHeaders, "uid", entry.getUserId(),
+ TarConstants.MAXID);
+ // star extensions by J\u00f6rg Schilling
+ addPaxHeaderForBigNumber(paxHeaders, "SCHILY.devmajor",
+ entry.getDevMajor(), TarConstants.MAXID);
+ addPaxHeaderForBigNumber(paxHeaders, "SCHILY.devminor",
+ entry.getDevMinor(), TarConstants.MAXID);
+ // there is no PAX header for file mode
+ failForBigNumber("mode", entry.getMode(), TarConstants.MAXID);
+ }
+
+ private void addPaxHeaderForBigNumber(Map paxHeaders,
+ String header, long value,
+ long maxValue) {
+ if (value < 0 || value > maxValue) {
+ paxHeaders.put(header, String.valueOf(value));
+ }
+ }
+
+ private void failForBigNumbers(TarArchiveEntry entry) {
+ failForBigNumber("entry size", entry.getSize(), TarConstants.MAXSIZE);
+ failForBigNumber("group id", entry.getGroupId(), TarConstants.MAXID);
+ failForBigNumber("last modification time",
+ entry.getModTime().getTime() / 1000,
+ TarConstants.MAXSIZE);
+ failForBigNumber("user id", entry.getUserId(), TarConstants.MAXID);
+ failForBigNumber("mode", entry.getMode(), TarConstants.MAXID);
+ failForBigNumber("major device number", entry.getDevMajor(),
+ TarConstants.MAXID);
+ failForBigNumber("minor device number", entry.getDevMinor(),
+ TarConstants.MAXID);
+ }
+
+ private void failForBigNumber(String field, long value, long maxValue) {
+ if (value < 0 || value > maxValue) {
+ throw new RuntimeException(field + " '" + value
+ + "' is too big ( > "
+ + maxValue + " )");
+ }
+ }
+
+ /**
+ * Handles long file or link names according to the longFileMode setting.
+ *
+ * I.e. if the given name is too long to be written to a plain
+ * tar header then
+ *
+ * - it creates a pax header who's name is given by the
+ * paxHeaderName parameter if longFileMode is POSIX
+ * - it creates a GNU longlink entry who's type is given by
+ * the linkType parameter if longFileMode is GNU
+ * - it throws an exception if longFileMode is ERROR
+ * - it truncates the name if longFileMode is TRUNCATE
+ *
+ *
+ * @param name the name to write
+ * @param paxHeaders current map of pax headers
+ * @param paxHeaderName name of the pax header to write
+ * @param linkType type of the GNU entry to write
+ * @param fieldName the name of the field
+ * @return whether a pax header has been written.
+ */
+ private boolean handleLongName(String name,
+ Map paxHeaders,
+ String paxHeaderName, byte linkType, String fieldName)
+ throws IOException {
+ final ByteBuffer encodedName = encoding.encode(name);
+ final int len = encodedName.limit() - encodedName.position();
+ if (len >= TarConstants.NAMELEN) {
+
+ if (longFileMode == LONGFILE_POSIX) {
+ paxHeaders.put(paxHeaderName, name);
+ return true;
+ } else if (longFileMode == LONGFILE_GNU) {
+ // create a TarEntry for the LongLink, the contents
+ // of which are the link's name
+ TarArchiveEntry longLinkEntry = new TarArchiveEntry(TarConstants.GNU_LONGLINK, linkType);
+
+ longLinkEntry.setSize(len + 1); // +1 for NUL
+ putArchiveEntry(longLinkEntry);
+ write(encodedName.array(), encodedName.arrayOffset(), len);
+ write(0); // NUL terminator
+ closeArchiveEntry();
+ } else if (longFileMode != LONGFILE_TRUNCATE) {
+ throw new RuntimeException(fieldName + " '" + name
+ + "' is too long ( > "
+ + TarConstants.NAMELEN + " bytes)");
+ }
+ }
+ return false;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveSparseEntry.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveSparseEntry.java
new file mode 100644
index 000000000..79b36ac8f
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarArchiveSparseEntry.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.tar;
+
+import java.io.IOException;
+
+/**
+ * This class represents a sparse entry in a Tar archive.
+ *
+ *
+ * The C structure for a sparse entry is:
+ *
+ * struct posix_header {
+ * struct sparse sp[21]; // TarConstants.SPARSELEN_GNU_SPARSE - offset 0
+ * char isextended; // TarConstants.ISEXTENDEDLEN_GNU_SPARSE - offset 504
+ * };
+ *
+ * Whereas, "struct sparse" is:
+ *
+ * struct sparse {
+ * char offset[12]; // offset 0
+ * char numbytes[12]; // offset 12
+ * };
+ *
+ */
+
+public class TarArchiveSparseEntry implements TarConstants {
+ /** If an extension sparse header follows. */
+ private final boolean isExtended;
+
+ /**
+ * Construct an entry from an archive's header bytes. File is set
+ * to null.
+ *
+ * @param headerBuf The header bytes from a tar archive entry.
+ * @throws IOException on unknown format
+ */
+ public TarArchiveSparseEntry(byte[] headerBuf) throws IOException {
+ int offset = 0;
+ offset += SPARSELEN_GNU_SPARSE;
+ isExtended = TarUtils.parseBoolean(headerBuf, offset);
+ }
+
+ public boolean isExtended() {
+ return isExtended;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarConstants.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarConstants.java
new file mode 100644
index 000000000..000d12654
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarConstants.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.tar;
+
+/**
+ * This interface contains all the definitions used in the package.
+ *
+ * For tar formats (FORMAT_OLDGNU, FORMAT_POSIX, etc.) see GNU tar
+ * tar.h type enum archive_format
+ */
+// CheckStyle:InterfaceIsTypeCheck OFF (bc)
+public interface TarConstants {
+
+ /** Default record size */
+ int DEFAULT_RCDSIZE = 512;
+
+ /** Default block size */
+ int DEFAULT_BLKSIZE = DEFAULT_RCDSIZE * 20;
+
+ /**
+ * GNU format as per before tar 1.12.
+ */
+ int FORMAT_OLDGNU = 2;
+
+ /**
+ * Pure Posix format.
+ */
+ int FORMAT_POSIX = 3;
+
+ /**
+ * The length of the name field in a header buffer.
+ */
+ int NAMELEN = 100;
+
+ /**
+ * The length of the mode field in a header buffer.
+ */
+ int MODELEN = 8;
+
+ /**
+ * The length of the user id field in a header buffer.
+ */
+ int UIDLEN = 8;
+
+ /**
+ * The length of the group id field in a header buffer.
+ */
+ int GIDLEN = 8;
+
+ /**
+ * The maximum value of gid/uid in a tar archive which can
+ * be expressed in octal char notation (that's 7 sevens, octal).
+ */
+ long MAXID = 07777777L;
+
+ /**
+ * The length of the checksum field in a header buffer.
+ */
+ int CHKSUMLEN = 8;
+
+ /**
+ * Offset of the checksum field within header record.
+ * @since 1.5
+ */
+ int CHKSUM_OFFSET = 148;
+
+ /**
+ * The length of the size field in a header buffer.
+ * Includes the trailing space or NUL.
+ */
+ int SIZELEN = 12;
+
+ /**
+ * The maximum size of a file in a tar archive
+ * which can be expressed in octal char notation (that's 11 sevens, octal).
+ */
+ long MAXSIZE = 077777777777L;
+
+ /** Offset of start of magic field within header record */
+ int MAGIC_OFFSET = 257;
+ /**
+ * The length of the magic field in a header buffer.
+ */
+ int MAGICLEN = 6;
+
+ /** Offset of start of magic field within header record */
+ int VERSION_OFFSET = 263;
+ /**
+ * Previously this was regarded as part of "magic" field, but it is separate.
+ */
+ int VERSIONLEN = 2;
+
+ /**
+ * The length of the modification time field in a header buffer.
+ */
+ int MODTIMELEN = 12;
+
+ /**
+ * The length of the user name field in a header buffer.
+ */
+ int UNAMELEN = 32;
+
+ /**
+ * The length of the group name field in a header buffer.
+ */
+ int GNAMELEN = 32;
+
+ /**
+ * The length of each of the device fields (major and minor) in a header buffer.
+ */
+ int DEVLEN = 8;
+
+ /**
+ * Length of the prefix field.
+ *
+ */
+ int PREFIXLEN = 155;
+
+ /**
+ * The length of the access time field in an old GNU header buffer.
+ *
+ */
+ int ATIMELEN_GNU = 12;
+
+ /**
+ * The length of the created time field in an old GNU header buffer.
+ *
+ */
+ int CTIMELEN_GNU = 12;
+
+ /**
+ * The length of the multivolume start offset field in an old GNU header buffer.
+ *
+ */
+ int OFFSETLEN_GNU = 12;
+
+ /**
+ * The length of the long names field in an old GNU header buffer.
+ *
+ */
+ int LONGNAMESLEN_GNU = 4;
+
+ /**
+ * The length of the padding field in an old GNU header buffer.
+ *
+ */
+ int PAD2LEN_GNU = 1;
+
+ /**
+ * The sum of the length of all sparse headers in an old GNU header buffer.
+ *
+ */
+ int SPARSELEN_GNU = 96;
+
+ /**
+ * The length of the is extension field in an old GNU header buffer.
+ *
+ */
+ int ISEXTENDEDLEN_GNU = 1;
+
+ /**
+ * The length of the real size field in an old GNU header buffer.
+ *
+ */
+ int REALSIZELEN_GNU = 12;
+
+ /**
+ * The sum of the length of all sparse headers in a sparse header buffer.
+ *
+ */
+ int SPARSELEN_GNU_SPARSE = 504;
+
+ /**
+ * The length of the is extension field in a sparse header buffer.
+ *
+ */
+ int ISEXTENDEDLEN_GNU_SPARSE = 1;
+
+ /**
+ * LF_ constants represent the "link flag" of an entry, or more commonly,
+ * the "entry type". This is the "old way" of indicating a normal file.
+ */
+ byte LF_OLDNORM = 0;
+
+ /**
+ * Normal file type.
+ */
+ byte LF_NORMAL = (byte) '0';
+
+ /**
+ * Link file type.
+ */
+ byte LF_LINK = (byte) '1';
+
+ /**
+ * Symbolic link file type.
+ */
+ byte LF_SYMLINK = (byte) '2';
+
+ /**
+ * Character device file type.
+ */
+ byte LF_CHR = (byte) '3';
+
+ /**
+ * Block device file type.
+ */
+ byte LF_BLK = (byte) '4';
+
+ /**
+ * Directory file type.
+ */
+ byte LF_DIR = (byte) '5';
+
+ /**
+ * FIFO (pipe) file type.
+ */
+ byte LF_FIFO = (byte) '6';
+
+ /**
+ * Contiguous file type.
+ */
+ byte LF_CONTIG = (byte) '7';
+
+ /**
+ * Identifies the *next* file on the tape as having a long linkname.
+ */
+ byte LF_GNUTYPE_LONGLINK = (byte) 'K';
+
+ /**
+ * Identifies the *next* file on the tape as having a long name.
+ */
+ byte LF_GNUTYPE_LONGNAME = (byte) 'L';
+
+ /**
+ * Sparse file type.
+ * @since 1.1.1
+ */
+ byte LF_GNUTYPE_SPARSE = (byte) 'S';
+
+ // See "http://www.opengroup.org/onlinepubs/009695399/utilities/pax.html#tag_04_100_13_02"
+
+ /**
+ * Identifies the entry as a Pax extended header.
+ * @since 1.1
+ */
+ byte LF_PAX_EXTENDED_HEADER_LC = (byte) 'x';
+
+ /**
+ * Identifies the entry as a Pax extended header (SunOS tar -E).
+ *
+ * @since 1.1
+ */
+ byte LF_PAX_EXTENDED_HEADER_UC = (byte) 'X';
+
+ /**
+ * Identifies the entry as a Pax global extended header.
+ *
+ * @since 1.1
+ */
+ byte LF_PAX_GLOBAL_EXTENDED_HEADER = (byte) 'g';
+
+ /**
+ * The magic tag representing a POSIX tar archive.
+ */
+ String MAGIC_POSIX = "ustar\0";
+ String VERSION_POSIX = "00";
+
+ /**
+ * The magic tag representing a GNU tar archive.
+ */
+ String MAGIC_GNU = "ustar ";
+ // Appear to be two possible GNU versions
+ String VERSION_GNU_SPACE = " \0";
+ String VERSION_GNU_ZERO = "0\0";
+
+ /**
+ * The magic tag representing an Ant tar archive.
+ *
+ * @since 1.1
+ */
+ String MAGIC_ANT = "ustar\0";
+
+ /**
+ * The "version" representing an Ant tar archive.
+ *
+ * @since 1.1
+ */
+ // Does not appear to have a version, however Ant does write 8 bytes,
+ // so assume the version is 2 nulls
+ String VERSION_ANT = "\0\0";
+
+ /**
+ * The name of the GNU tar entry which contains a long name.
+ */
+ String GNU_LONGLINK = "././@LongLink"; // TODO rename as LONGLINK_GNU ?
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarUtils.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarUtils.java
new file mode 100644
index 000000000..1579dcbb8
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/TarUtils.java
@@ -0,0 +1,632 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.tar;
+
+import static org.apache.commons.compress.archivers.tar.TarConstants.CHKSUMLEN;
+import static org.apache.commons.compress.archivers.tar.TarConstants.CHKSUM_OFFSET;
+
+import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.ByteBuffer;
+import org.apache.commons.compress.archivers.zip.ZipEncoding;
+import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
+
+/**
+ * This class provides static utility methods to work with byte streams.
+ *
+ * @Immutable
+ */
+// CheckStyle:HideUtilityClassConstructorCheck OFF (bc)
+public class TarUtils {
+
+ private static final int BYTE_MASK = 255;
+
+ static final ZipEncoding DEFAULT_ENCODING =
+ ZipEncodingHelper.getZipEncoding(null);
+
+ /**
+ * Encapsulates the algorithms used up to Commons Compress 1.3 as
+ * ZipEncoding.
+ */
+ static final ZipEncoding FALLBACK_ENCODING = new ZipEncoding() {
+ public boolean canEncode(String name) { return true; }
+
+ public ByteBuffer encode(String name) {
+ final int length = name.length();
+ byte[] buf = new byte[length];
+
+ // copy until end of input or output is reached.
+ for (int i = 0; i < length; ++i) {
+ buf[i] = (byte) name.charAt(i);
+ }
+ return ByteBuffer.wrap(buf);
+ }
+
+ public String decode(byte[] buffer) {
+ final int length = buffer.length;
+ StringBuilder result = new StringBuilder(length);
+
+ for (int i = 0; i < length; ++i) {
+ byte b = buffer[i];
+ if (b == 0) { // Trailing null
+ break;
+ }
+ result.append((char) (b & 0xFF)); // Allow for sign-extension
+ }
+
+ return result.toString();
+ }
+ };
+
+ /** Private constructor to prevent instantiation of this utility class. */
+ private TarUtils(){
+ }
+
+ /**
+ * Parse an octal string from a buffer.
+ *
+ * Leading spaces are ignored.
+ * The buffer must contain a trailing space or NUL,
+ * and may contain an additional trailing space or NUL.
+ *
+ * The input buffer is allowed to contain all NULs,
+ * in which case the method returns 0L
+ * (this allows for missing fields).
+ *
+ * To work-around some tar implementations that insert a
+ * leading NUL this method returns 0 if it detects a leading NUL
+ * since Commons Compress 1.4.
+ *
+ * @param buffer The buffer from which to parse.
+ * @param offset The offset into the buffer from which to parse.
+ * @param length The maximum number of bytes to parse - must be at least 2 bytes.
+ * @return The long value of the octal string.
+ * @throws IllegalArgumentException if the trailing space/NUL is missing or if a invalid byte is detected.
+ */
+ public static long parseOctal(final byte[] buffer, final int offset, final int length) {
+ long result = 0;
+ int end = offset + length;
+ int start = offset;
+
+ if (length < 2){
+ throw new IllegalArgumentException("Length "+length+" must be at least 2");
+ }
+
+ if (buffer[start] == 0) {
+ return 0L;
+ }
+
+ // Skip leading spaces
+ while (start < end){
+ if (buffer[start] == ' '){
+ start++;
+ } else {
+ break;
+ }
+ }
+
+ // Must have trailing NUL or space
+ byte trailer;
+ trailer = buffer[end-1];
+ if (trailer == 0 || trailer == ' '){
+ end--;
+ } else {
+ throw new IllegalArgumentException(
+ exceptionMessage(buffer, offset, length, end-1, trailer));
+ }
+ // May have additional NULs or spaces
+ trailer = buffer[end - 1];
+ while (start < end - 1 && (trailer == 0 || trailer == ' ')) {
+ end--;
+ trailer = buffer[end - 1];
+ }
+
+ for ( ;start < end; start++) {
+ final byte currentByte = buffer[start];
+ // CheckStyle:MagicNumber OFF
+ if (currentByte < '0' || currentByte > '7'){
+ throw new IllegalArgumentException(
+ exceptionMessage(buffer, offset, length, start, currentByte));
+ }
+ result = (result << 3) + (currentByte - '0'); // convert from ASCII
+ // CheckStyle:MagicNumber ON
+ }
+
+ return result;
+ }
+
+ /**
+ * Compute the value contained in a byte buffer. If the most
+ * significant bit of the first byte in the buffer is set, this
+ * bit is ignored and the rest of the buffer is interpreted as a
+ * binary number. Otherwise, the buffer is interpreted as an
+ * octal number as per the parseOctal function above.
+ *
+ * @param buffer The buffer from which to parse.
+ * @param offset The offset into the buffer from which to parse.
+ * @param length The maximum number of bytes to parse.
+ * @return The long value of the octal or binary string.
+ * @throws IllegalArgumentException if the trailing space/NUL is
+ * missing or an invalid byte is detected in an octal number, or
+ * if a binary number would exceed the size of a signed long
+ * 64-bit integer.
+ * @since 1.4
+ */
+ public static long parseOctalOrBinary(final byte[] buffer, final int offset,
+ final int length) {
+
+ if ((buffer[offset] & 0x80) == 0) {
+ return parseOctal(buffer, offset, length);
+ }
+ final boolean negative = buffer[offset] == (byte) 0xff;
+ if (length < 9) {
+ return parseBinaryLong(buffer, offset, length, negative);
+ }
+ return parseBinaryBigInteger(buffer, offset, length, negative);
+ }
+
+ private static long parseBinaryLong(final byte[] buffer, final int offset,
+ final int length,
+ final boolean negative) {
+ if (length >= 9) {
+ throw new IllegalArgumentException("At offset " + offset + ", "
+ + length + " byte binary number"
+ + " exceeds maximum signed long"
+ + " value");
+ }
+ long val = 0;
+ for (int i = 1; i < length; i++) {
+ val = (val << 8) + (buffer[offset + i] & 0xff);
+ }
+ if (negative) {
+ // 2's complement
+ val--;
+ val ^= (long) Math.pow(2, (length - 1) * 8) - 1;
+ }
+ return negative ? -val : val;
+ }
+
+ private static long parseBinaryBigInteger(final byte[] buffer,
+ final int offset,
+ final int length,
+ final boolean negative) {
+ byte[] remainder = new byte[length - 1];
+ System.arraycopy(buffer, offset + 1, remainder, 0, length - 1);
+ BigInteger val = new BigInteger(remainder);
+ if (negative) {
+ // 2's complement
+ val = val.add(BigInteger.valueOf(-1)).not();
+ }
+ if (val.bitLength() > 63) {
+ throw new IllegalArgumentException("At offset " + offset + ", "
+ + length + " byte binary number"
+ + " exceeds maximum signed long"
+ + " value");
+ }
+ return negative ? -val.longValue() : val.longValue();
+ }
+
+ /**
+ * Parse a boolean byte from a buffer.
+ * Leading spaces and NUL are ignored.
+ * The buffer may contain trailing spaces or NULs.
+ *
+ * @param buffer The buffer from which to parse.
+ * @param offset The offset into the buffer from which to parse.
+ * @return The boolean value of the bytes.
+ * @throws IllegalArgumentException if an invalid byte is detected.
+ */
+ public static boolean parseBoolean(final byte[] buffer, final int offset) {
+ return buffer[offset] == 1;
+ }
+
+ // Helper method to generate the exception message
+ private static String exceptionMessage(byte[] buffer, final int offset,
+ final int length, int current, final byte currentByte) {
+ // default charset is good enough for an exception message,
+ //
+ // the alternative was to modify parseOctal and
+ // parseOctalOrBinary to receive the ZipEncoding of the
+ // archive (deprecating the existing public methods, of
+ // course) and dealing with the fact that ZipEncoding#decode
+ // can throw an IOException which parseOctal* doesn't declare
+ String string = new String(buffer, offset, length);
+
+ string=string.replaceAll("\0", "{NUL}"); // Replace NULs to allow string to be printed
+ final String s = "Invalid byte "+currentByte+" at offset "+(current-offset)+" in '"+string+"' len="+length;
+ return s;
+ }
+
+ /**
+ * Parse an entry name from a buffer.
+ * Parsing stops when a NUL is found
+ * or the buffer length is reached.
+ *
+ * @param buffer The buffer from which to parse.
+ * @param offset The offset into the buffer from which to parse.
+ * @param length The maximum number of bytes to parse.
+ * @return The entry name.
+ */
+ public static String parseName(byte[] buffer, final int offset, final int length) {
+ try {
+ return parseName(buffer, offset, length, DEFAULT_ENCODING);
+ } catch (IOException ex) {
+ try {
+ return parseName(buffer, offset, length, FALLBACK_ENCODING);
+ } catch (IOException ex2) {
+ // impossible
+ throw new RuntimeException(ex2);
+ }
+ }
+ }
+
+ /**
+ * Parse an entry name from a buffer.
+ * Parsing stops when a NUL is found
+ * or the buffer length is reached.
+ *
+ * @param buffer The buffer from which to parse.
+ * @param offset The offset into the buffer from which to parse.
+ * @param length The maximum number of bytes to parse.
+ * @param encoding name of the encoding to use for file names
+ * @since 1.4
+ * @return The entry name.
+ */
+ public static String parseName(byte[] buffer, final int offset,
+ final int length,
+ final ZipEncoding encoding)
+ throws IOException {
+
+ int len = length;
+ for (; len > 0; len--) {
+ if (buffer[offset + len - 1] != 0) {
+ break;
+ }
+ }
+ if (len > 0) {
+ byte[] b = new byte[len];
+ System.arraycopy(buffer, offset, b, 0, len);
+ return encoding.decode(b);
+ }
+ return "";
+ }
+
+ /**
+ * Copy a name into a buffer.
+ * Copies characters from the name into the buffer
+ * starting at the specified offset.
+ * If the buffer is longer than the name, the buffer
+ * is filled with trailing NULs.
+ * If the name is longer than the buffer,
+ * the output is truncated.
+ *
+ * @param name The header name from which to copy the characters.
+ * @param buf The buffer where the name is to be stored.
+ * @param offset The starting offset into the buffer
+ * @param length The maximum number of header bytes to copy.
+ * @return The updated offset, i.e. offset + length
+ */
+ public static int formatNameBytes(String name, byte[] buf, final int offset, final int length) {
+ try {
+ return formatNameBytes(name, buf, offset, length, DEFAULT_ENCODING);
+ } catch (IOException ex) {
+ try {
+ return formatNameBytes(name, buf, offset, length,
+ FALLBACK_ENCODING);
+ } catch (IOException ex2) {
+ // impossible
+ throw new RuntimeException(ex2);
+ }
+ }
+ }
+
+ /**
+ * Copy a name into a buffer.
+ * Copies characters from the name into the buffer
+ * starting at the specified offset.
+ * If the buffer is longer than the name, the buffer
+ * is filled with trailing NULs.
+ * If the name is longer than the buffer,
+ * the output is truncated.
+ *
+ * @param name The header name from which to copy the characters.
+ * @param buf The buffer where the name is to be stored.
+ * @param offset The starting offset into the buffer
+ * @param length The maximum number of header bytes to copy.
+ * @param encoding name of the encoding to use for file names
+ * @since 1.4
+ * @return The updated offset, i.e. offset + length
+ */
+ public static int formatNameBytes(String name, byte[] buf, final int offset,
+ final int length,
+ final ZipEncoding encoding)
+ throws IOException {
+ int len = name.length();
+ ByteBuffer b = encoding.encode(name);
+ while (b.limit() > length && len > 0) {
+ b = encoding.encode(name.substring(0, --len));
+ }
+ final int limit = b.limit() - b.position();
+ System.arraycopy(b.array(), b.arrayOffset(), buf, offset, limit);
+
+ // Pad any remaining output bytes with NUL
+ for (int i = limit; i < length; ++i) {
+ buf[offset + i] = 0;
+ }
+
+ return offset + length;
+ }
+
+ /**
+ * Fill buffer with unsigned octal number, padded with leading zeroes.
+ *
+ * @param value number to convert to octal - treated as unsigned
+ * @param buffer destination buffer
+ * @param offset starting offset in buffer
+ * @param length length of buffer to fill
+ * @throws IllegalArgumentException if the value will not fit in the buffer
+ */
+ public static void formatUnsignedOctalString(final long value, byte[] buffer,
+ final int offset, final int length) {
+ int remaining = length;
+ remaining--;
+ if (value == 0) {
+ buffer[offset + remaining--] = (byte) '0';
+ } else {
+ long val = value;
+ for (; remaining >= 0 && val != 0; --remaining) {
+ // CheckStyle:MagicNumber OFF
+ buffer[offset + remaining] = (byte) ((byte) '0' + (byte) (val & 7));
+ val = val >>> 3;
+ // CheckStyle:MagicNumber ON
+ }
+ if (val != 0){
+ throw new IllegalArgumentException
+ (value+"="+Long.toOctalString(value)+ " will not fit in octal number buffer of length "+length);
+ }
+ }
+
+ for (; remaining >= 0; --remaining) { // leading zeros
+ buffer[offset + remaining] = (byte) '0';
+ }
+ }
+
+ /**
+ * Write an octal integer into a buffer.
+ *
+ * Uses {@link #formatUnsignedOctalString} to format
+ * the value as an octal string with leading zeros.
+ * The converted number is followed by space and NUL
+ *
+ * @param value The value to write
+ * @param buf The buffer to receive the output
+ * @param offset The starting offset into the buffer
+ * @param length The size of the output buffer
+ * @return The updated offset, i.e offset+length
+ * @throws IllegalArgumentException if the value (and trailer) will not fit in the buffer
+ */
+ public static int formatOctalBytes(final long value, byte[] buf, final int offset, final int length) {
+
+ int idx=length-2; // For space and trailing null
+ formatUnsignedOctalString(value, buf, offset, idx);
+
+ buf[offset + idx++] = (byte) ' '; // Trailing space
+ buf[offset + idx] = 0; // Trailing null
+
+ return offset + length;
+ }
+
+ /**
+ * Write an octal long integer into a buffer.
+ *
+ * Uses {@link #formatUnsignedOctalString} to format
+ * the value as an octal string with leading zeros.
+ * The converted number is followed by a space.
+ *
+ * @param value The value to write as octal
+ * @param buf The destinationbuffer.
+ * @param offset The starting offset into the buffer.
+ * @param length The length of the buffer
+ * @return The updated offset
+ * @throws IllegalArgumentException if the value (and trailer) will not fit in the buffer
+ */
+ public static int formatLongOctalBytes(final long value, byte[] buf, final int offset, final int length) {
+
+ int idx=length-1; // For space
+
+ formatUnsignedOctalString(value, buf, offset, idx);
+ buf[offset + idx] = (byte) ' '; // Trailing space
+
+ return offset + length;
+ }
+
+ /**
+ * Write an long integer into a buffer as an octal string if this
+ * will fit, or as a binary number otherwise.
+ *
+ * Uses {@link #formatUnsignedOctalString} to format
+ * the value as an octal string with leading zeros.
+ * The converted number is followed by a space.
+ *
+ * @param value The value to write into the buffer.
+ * @param buf The destination buffer.
+ * @param offset The starting offset into the buffer.
+ * @param length The length of the buffer.
+ * @return The updated offset.
+ * @throws IllegalArgumentException if the value (and trailer)
+ * will not fit in the buffer.
+ * @since 1.4
+ */
+ public static int formatLongOctalOrBinaryBytes(
+ final long value, byte[] buf, final int offset, final int length) {
+
+ // Check whether we are dealing with UID/GID or SIZE field
+ final long maxAsOctalChar = length == TarConstants.UIDLEN ? TarConstants.MAXID : TarConstants.MAXSIZE;
+
+ final boolean negative = value < 0;
+ if (!negative && value <= maxAsOctalChar) { // OK to store as octal chars
+ return formatLongOctalBytes(value, buf, offset, length);
+ }
+
+ if (length < 9) {
+ formatLongBinary(value, buf, offset, length, negative);
+ }
+ formatBigIntegerBinary(value, buf, offset, length, negative);
+
+ buf[offset] = (byte) (negative ? 0xff : 0x80);
+ return offset + length;
+ }
+
+ private static void formatLongBinary(final long value, byte[] buf,
+ final int offset, final int length,
+ final boolean negative) {
+ final int bits = (length - 1) * 8;
+ final long max = 1l << bits;
+ long val = Math.abs(value);
+ if (val >= max) {
+ throw new IllegalArgumentException("Value " + value +
+ " is too large for " + length + " byte field.");
+ }
+ if (negative) {
+ val ^= max - 1;
+ val |= 0xff << bits;
+ val++;
+ }
+ for (int i = offset + length - 1; i >= offset; i--) {
+ buf[i] = (byte) val;
+ val >>= 8;
+ }
+ }
+
+ private static void formatBigIntegerBinary(final long value, byte[] buf,
+ final int offset,
+ final int length,
+ final boolean negative) {
+ BigInteger val = BigInteger.valueOf(value);
+ final byte[] b = val.toByteArray();
+ final int len = b.length;
+ final int off = offset + length - len;
+ System.arraycopy(b, 0, buf, off, len);
+ final byte fill = (byte) (negative ? 0xff : 0);
+ for (int i = offset + 1; i < off; i++) {
+ buf[i] = fill;
+ }
+ }
+
+ /**
+ * Writes an octal value into a buffer.
+ *
+ * Uses {@link #formatUnsignedOctalString} to format
+ * the value as an octal string with leading zeros.
+ * The converted number is followed by NUL and then space.
+ *
+ * @param value The value to convert
+ * @param buf The destination buffer
+ * @param offset The starting offset into the buffer.
+ * @param length The size of the buffer.
+ * @return The updated value of offset, i.e. offset+length
+ * @throws IllegalArgumentException if the value (and trailer) will not fit in the buffer
+ */
+ public static int formatCheckSumOctalBytes(final long value, byte[] buf, final int offset, final int length) {
+
+ int idx=length-2; // for NUL and space
+ formatUnsignedOctalString(value, buf, offset, idx);
+
+ buf[offset + idx++] = 0; // Trailing null
+ buf[offset + idx] = (byte) ' '; // Trailing space
+
+ return offset + length;
+ }
+
+ /**
+ * Compute the checksum of a tar entry header.
+ *
+ * @param buf The tar entry's header buffer.
+ * @return The computed checksum.
+ */
+ public static long computeCheckSum(final byte[] buf) {
+ long sum = 0;
+
+ for (byte element : buf) {
+ sum += BYTE_MASK & element;
+ }
+
+ return sum;
+ }
+
+ /**
+ * Wikipedia says:
+ *
+ * The checksum is calculated by taking the sum of the unsigned byte values
+ * of the header block with the eight checksum bytes taken to be ascii
+ * spaces (decimal value 32). It is stored as a six digit octal number with
+ * leading zeroes followed by a NUL and then a space. Various
+ * implementations do not adhere to this format. For better compatibility,
+ * ignore leading and trailing whitespace, and get the first six digits. In
+ * addition, some historic tar implementations treated bytes as signed.
+ * Implementations typically calculate the checksum both ways, and treat it
+ * as good if either the signed or unsigned sum matches the included
+ * checksum.
+ *
+ *
+ * In addition there are
+ * some tar files
+ * that seem to have parts of their header cleared to zero (no detectable
+ * magic bytes, etc.) but still have a reasonable-looking checksum field
+ * present. It looks like we can detect such cases reasonably well by
+ * checking whether the stored checksum is greater than the
+ * computed unsigned checksum. That check is unlikely to pass on some
+ * random file header, as it would need to have a valid sequence of
+ * octal digits in just the right place.
+ *
+ * The return value of this method should be treated as a best-effort
+ * heuristic rather than an absolute and final truth. The checksum
+ * verification logic may well evolve over time as more special cases
+ * are encountered.
+ *
+ * @param header tar header
+ * @return whether the checksum is reasonably good
+ * @see COMPRESS-191
+ * @since 1.5
+ */
+ public static boolean verifyCheckSum(byte[] header) {
+ long storedSum = 0;
+ long unsignedSum = 0;
+ long signedSum = 0;
+
+ int digits = 0;
+ for (int i = 0; i < header.length; i++) {
+ byte b = header[i];
+ if (CHKSUM_OFFSET <= i && i < CHKSUM_OFFSET + CHKSUMLEN) {
+ if ('0' <= b && b <= '7' && digits++ < 6) {
+ storedSum = storedSum * 8 + b - '0';
+ } else if (digits > 0) {
+ digits = 6; // only look at the first octal digit sequence
+ }
+ b = ' ';
+ }
+ unsignedSum += 0xff & b;
+ signedSum += b;
+ }
+
+ return storedSum == unsignedSum || storedSum == signedSum
+ || storedSum > unsignedSum; // COMPRESS-177
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/package.html
new file mode 100644
index 000000000..141f33b61
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/tar/package.html
@@ -0,0 +1,30 @@
+
+
+
+ Provides stream classes for reading and writing archives using
+ the TAR format.
+
+ There are many different format dialects that call themselves
+ TAR. The classes of this package can read and write archives in
+ the traditional pre-POSIX ustar format and support GNU
+ specific extensions for long filenames that GNU tar itself by
+ now refers to as oldgnu.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/AbstractUnicodeExtraField.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/AbstractUnicodeExtraField.java
new file mode 100644
index 000000000..a0a76f114
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/AbstractUnicodeExtraField.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.UnsupportedEncodingException;
+import java.util.zip.CRC32;
+import java.util.zip.ZipException;
+
+import org.apache.commons.compress.utils.CharsetNames;
+
+/**
+ * A common base class for Unicode extra information extra fields.
+ * @NotThreadSafe
+ */
+public abstract class AbstractUnicodeExtraField implements ZipExtraField {
+ private long nameCRC32;
+ private byte[] unicodeName;
+ private byte[] data;
+
+ protected AbstractUnicodeExtraField() {
+ }
+
+ /**
+ * Assemble as unicode extension from the name/comment and
+ * encoding of the original zip entry.
+ *
+ * @param text The file name or comment.
+ * @param bytes The encoded of the filename or comment in the zip
+ * file.
+ * @param off The offset of the encoded filename or comment in
+ * bytes.
+ * @param len The length of the encoded filename or commentin
+ * bytes.
+ */
+ protected AbstractUnicodeExtraField(String text, byte[] bytes, int off, int len) {
+ CRC32 crc32 = new CRC32();
+ crc32.update(bytes, off, len);
+ nameCRC32 = crc32.getValue();
+
+ try {
+ unicodeName = text.getBytes(CharsetNames.UTF_8);
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException("FATAL: UTF-8 encoding not supported.", e);
+ }
+ }
+
+ /**
+ * Assemble as unicode extension from the name/comment and
+ * encoding of the original zip entry.
+ *
+ * @param text The file name or comment.
+ * @param bytes The encoded of the filename or comment in the zip
+ * file.
+ */
+ protected AbstractUnicodeExtraField(String text, byte[] bytes) {
+ this(text, bytes, 0, bytes.length);
+ }
+
+ private void assembleData() {
+ if (unicodeName == null) {
+ return;
+ }
+
+ data = new byte[5 + unicodeName.length];
+ // version 1
+ data[0] = 0x01;
+ System.arraycopy(ZipLong.getBytes(nameCRC32), 0, data, 1, 4);
+ System.arraycopy(unicodeName, 0, data, 5, unicodeName.length);
+ }
+
+ /**
+ * @return The CRC32 checksum of the filename or comment as
+ * encoded in the central directory of the zip file.
+ */
+ public long getNameCRC32() {
+ return nameCRC32;
+ }
+
+ /**
+ * @param nameCRC32 The CRC32 checksum of the filename as encoded
+ * in the central directory of the zip file to set.
+ */
+ public void setNameCRC32(long nameCRC32) {
+ this.nameCRC32 = nameCRC32;
+ data = null;
+ }
+
+ /**
+ * @return The UTF-8 encoded name.
+ */
+ public byte[] getUnicodeName() {
+ byte[] b = null;
+ if (unicodeName != null) {
+ b = new byte[unicodeName.length];
+ System.arraycopy(unicodeName, 0, b, 0, b.length);
+ }
+ return b;
+ }
+
+ /**
+ * @param unicodeName The UTF-8 encoded name to set.
+ */
+ public void setUnicodeName(byte[] unicodeName) {
+ if (unicodeName != null) {
+ this.unicodeName = new byte[unicodeName.length];
+ System.arraycopy(unicodeName, 0, this.unicodeName, 0,
+ unicodeName.length);
+ } else {
+ this.unicodeName = null;
+ }
+ data = null;
+ }
+
+ public byte[] getCentralDirectoryData() {
+ if (data == null) {
+ this.assembleData();
+ }
+ byte[] b = null;
+ if (data != null) {
+ b = new byte[data.length];
+ System.arraycopy(data, 0, b, 0, b.length);
+ }
+ return b;
+ }
+
+ public ZipShort getCentralDirectoryLength() {
+ if (data == null) {
+ assembleData();
+ }
+ return new ZipShort(data.length);
+ }
+
+ public byte[] getLocalFileDataData() {
+ return getCentralDirectoryData();
+ }
+
+ public ZipShort getLocalFileDataLength() {
+ return getCentralDirectoryLength();
+ }
+
+ public void parseFromLocalFileData(byte[] buffer, int offset, int length)
+ throws ZipException {
+
+ if (length < 5) {
+ throw new ZipException("UniCode path extra data must have at least 5 bytes.");
+ }
+
+ int version = buffer[offset];
+
+ if (version != 0x01) {
+ throw new ZipException("Unsupported version [" + version
+ + "] for UniCode path extra data.");
+ }
+
+ nameCRC32 = ZipLong.getValue(buffer, offset + 1);
+ unicodeName = new byte[length - 5];
+ System.arraycopy(buffer, offset + 5, unicodeName, 0, length - 5);
+ data = null;
+ }
+
+ /**
+ * Doesn't do anything special since this class always uses the
+ * same data in central directory and local file data.
+ */
+ public void parseFromCentralDirectoryData(byte[] buffer, int offset,
+ int length)
+ throws ZipException {
+ parseFromLocalFileData(buffer, offset, length);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/AsiExtraField.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/AsiExtraField.java
new file mode 100644
index 000000000..a2dc1c3b3
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/AsiExtraField.java
@@ -0,0 +1,330 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.util.zip.CRC32;
+import java.util.zip.ZipException;
+
+/**
+ * Adds Unix file permission and UID/GID fields as well as symbolic
+ * link handling.
+ *
+ * This class uses the ASi extra field in the format:
+ *
+ * Value Size Description
+ * ----- ---- -----------
+ * (Unix3) 0x756e Short tag for this extra block type
+ * TSize Short total data size for this block
+ * CRC Long CRC-32 of the remaining data
+ * Mode Short file permissions
+ * SizDev Long symlink'd size OR major/minor dev num
+ * UID Short user ID
+ * GID Short group ID
+ * (var.) variable symbolic link filename
+ *
+ * taken from appnote.iz (Info-ZIP note, 981119) found at ftp://ftp.uu.net/pub/archiving/zip/doc/
+ *
+ * Short is two bytes and Long is four bytes in big endian byte and
+ * word order, device numbers are currently not supported.
+ * @NotThreadSafe
+ *
+ * Since the documentation this class is based upon doesn't mention
+ * the character encoding of the file name at all, it is assumed that
+ * it uses the current platform's default encoding.
+ */
+public class AsiExtraField implements ZipExtraField, UnixStat, Cloneable {
+
+ private static final ZipShort HEADER_ID = new ZipShort(0x756E);
+ private static final int WORD = 4;
+ /**
+ * Standard Unix stat(2) file mode.
+ */
+ private int mode = 0;
+ /**
+ * User ID.
+ */
+ private int uid = 0;
+ /**
+ * Group ID.
+ */
+ private int gid = 0;
+ /**
+ * File this entry points to, if it is a symbolic link.
+ *
+ * empty string - if entry is not a symbolic link.
+ */
+ private String link = "";
+ /**
+ * Is this an entry for a directory?
+ */
+ private boolean dirFlag = false;
+
+ /**
+ * Instance used to calculate checksums.
+ */
+ private CRC32 crc = new CRC32();
+
+ /** Constructor for AsiExtraField. */
+ public AsiExtraField() {
+ }
+
+ /**
+ * The Header-ID.
+ * @return the value for the header id for this extrafield
+ */
+ public ZipShort getHeaderId() {
+ return HEADER_ID;
+ }
+
+ /**
+ * Length of the extra field in the local file data - without
+ * Header-ID or length specifier.
+ * @return a ZipShort for the length of the data of this extra field
+ */
+ public ZipShort getLocalFileDataLength() {
+ return new ZipShort(WORD // CRC
+ + 2 // Mode
+ + WORD // SizDev
+ + 2 // UID
+ + 2 // GID
+ + getLinkedFile().getBytes().length);
+ // Uses default charset - see class Javadoc
+ }
+
+ /**
+ * Delegate to local file data.
+ * @return the centralDirectory length
+ */
+ public ZipShort getCentralDirectoryLength() {
+ return getLocalFileDataLength();
+ }
+
+ /**
+ * The actual data to put into local file data - without Header-ID
+ * or length specifier.
+ * @return get the data
+ */
+ public byte[] getLocalFileDataData() {
+ // CRC will be added later
+ byte[] data = new byte[getLocalFileDataLength().getValue() - WORD];
+ System.arraycopy(ZipShort.getBytes(getMode()), 0, data, 0, 2);
+
+ byte[] linkArray = getLinkedFile().getBytes(); // Uses default charset - see class Javadoc
+ // CheckStyle:MagicNumber OFF
+ System.arraycopy(ZipLong.getBytes(linkArray.length),
+ 0, data, 2, WORD);
+
+ System.arraycopy(ZipShort.getBytes(getUserId()),
+ 0, data, 6, 2);
+ System.arraycopy(ZipShort.getBytes(getGroupId()),
+ 0, data, 8, 2);
+
+ System.arraycopy(linkArray, 0, data, 10, linkArray.length);
+ // CheckStyle:MagicNumber ON
+
+ crc.reset();
+ crc.update(data);
+ long checksum = crc.getValue();
+
+ byte[] result = new byte[data.length + WORD];
+ System.arraycopy(ZipLong.getBytes(checksum), 0, result, 0, WORD);
+ System.arraycopy(data, 0, result, WORD, data.length);
+ return result;
+ }
+
+ /**
+ * Delegate to local file data.
+ * @return the local file data
+ */
+ public byte[] getCentralDirectoryData() {
+ return getLocalFileDataData();
+ }
+
+ /**
+ * Set the user id.
+ * @param uid the user id
+ */
+ public void setUserId(int uid) {
+ this.uid = uid;
+ }
+
+ /**
+ * Get the user id.
+ * @return the user id
+ */
+ public int getUserId() {
+ return uid;
+ }
+
+ /**
+ * Set the group id.
+ * @param gid the group id
+ */
+ public void setGroupId(int gid) {
+ this.gid = gid;
+ }
+
+ /**
+ * Get the group id.
+ * @return the group id
+ */
+ public int getGroupId() {
+ return gid;
+ }
+
+ /**
+ * Indicate that this entry is a symbolic link to the given filename.
+ *
+ * @param name Name of the file this entry links to, empty String
+ * if it is not a symbolic link.
+ */
+ public void setLinkedFile(String name) {
+ link = name;
+ mode = getMode(mode);
+ }
+
+ /**
+ * Name of linked file
+ *
+ * @return name of the file this entry links to if it is a
+ * symbolic link, the empty string otherwise.
+ */
+ public String getLinkedFile() {
+ return link;
+ }
+
+ /**
+ * Is this entry a symbolic link?
+ * @return true if this is a symbolic link
+ */
+ public boolean isLink() {
+ return getLinkedFile().length() != 0;
+ }
+
+ /**
+ * File mode of this file.
+ * @param mode the file mode
+ */
+ public void setMode(int mode) {
+ this.mode = getMode(mode);
+ }
+
+ /**
+ * File mode of this file.
+ * @return the file mode
+ */
+ public int getMode() {
+ return mode;
+ }
+
+ /**
+ * Indicate whether this entry is a directory.
+ * @param dirFlag if true, this entry is a directory
+ */
+ public void setDirectory(boolean dirFlag) {
+ this.dirFlag = dirFlag;
+ mode = getMode(mode);
+ }
+
+ /**
+ * Is this entry a directory?
+ * @return true if this entry is a directory
+ */
+ public boolean isDirectory() {
+ return dirFlag && !isLink();
+ }
+
+ /**
+ * Populate data from this array as if it was in local file data.
+ * @param data an array of bytes
+ * @param offset the start offset
+ * @param length the number of bytes in the array from offset
+ * @throws ZipException on error
+ */
+ public void parseFromLocalFileData(byte[] data, int offset, int length)
+ throws ZipException {
+
+ long givenChecksum = ZipLong.getValue(data, offset);
+ byte[] tmp = new byte[length - WORD];
+ System.arraycopy(data, offset + WORD, tmp, 0, length - WORD);
+ crc.reset();
+ crc.update(tmp);
+ long realChecksum = crc.getValue();
+ if (givenChecksum != realChecksum) {
+ throw new ZipException("bad CRC checksum "
+ + Long.toHexString(givenChecksum)
+ + " instead of "
+ + Long.toHexString(realChecksum));
+ }
+
+ int newMode = ZipShort.getValue(tmp, 0);
+ // CheckStyle:MagicNumber OFF
+ byte[] linkArray = new byte[(int) ZipLong.getValue(tmp, 2)];
+ uid = ZipShort.getValue(tmp, 6);
+ gid = ZipShort.getValue(tmp, 8);
+
+ if (linkArray.length == 0) {
+ link = "";
+ } else {
+ System.arraycopy(tmp, 10, linkArray, 0, linkArray.length);
+ link = new String(linkArray); // Uses default charset - see class Javadoc
+ }
+ // CheckStyle:MagicNumber ON
+ setDirectory((newMode & DIR_FLAG) != 0);
+ setMode(newMode);
+ }
+
+ /**
+ * Doesn't do anything special since this class always uses the
+ * same data in central directory and local file data.
+ */
+ public void parseFromCentralDirectoryData(byte[] buffer, int offset,
+ int length)
+ throws ZipException {
+ parseFromLocalFileData(buffer, offset, length);
+ }
+
+ /**
+ * Get the file mode for given permissions with the correct file type.
+ * @param mode the mode
+ * @return the type with the mode
+ */
+ protected int getMode(int mode) {
+ int type = FILE_FLAG;
+ if (isLink()) {
+ type = LINK_FLAG;
+ } else if (isDirectory()) {
+ type = DIR_FLAG;
+ }
+ return type | (mode & PERM_MASK);
+ }
+
+ @Override
+ public Object clone() {
+ try {
+ AsiExtraField cloned = (AsiExtraField) super.clone();
+ cloned.crc = new CRC32();
+ return cloned;
+ } catch (CloneNotSupportedException cnfe) {
+ // impossible
+ throw new RuntimeException(cnfe);
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/BinaryTree.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/BinaryTree.java
new file mode 100644
index 000000000..a4ac4b5b7
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/BinaryTree.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+
+/**
+ * Binary tree of positive values.
+ *
+ * @author Emmanuel Bourg
+ * @since 1.7
+ */
+class BinaryTree {
+
+ /** Value in the array indicating an undefined node */
+ private static final int UNDEFINED = -1;
+
+ /** Value in the array indicating a non leaf node */
+ private static final int NODE = -2;
+
+ /**
+ * The array representing the binary tree. The root is at index 0,
+ * the left children are at 2*i+1 and the right children at 2*i+2.
+ */
+ private final int[] tree;
+
+ public BinaryTree(int depth) {
+ tree = new int[(1 << (depth + 1)) - 1];
+ Arrays.fill(tree, UNDEFINED);
+ }
+
+ /**
+ * Adds a leaf to the tree.
+ *
+ * @param node the index of the node where the path is appended
+ * @param path the path to the leaf (bits are parsed from the right to the left)
+ * @param depth the number of nodes in the path
+ * @param value the value of the leaf (must be positive)
+ */
+ public void addLeaf(int node, int path, int depth, int value) {
+ if (depth == 0) {
+ // end of the path reached, add the value to the current node
+ if (tree[node] == UNDEFINED) {
+ tree[node] = value;
+ } else {
+ throw new IllegalArgumentException("Tree value at index " + node + " has already been assigned (" + tree[node] + ")");
+ }
+ } else {
+ // mark the current node as a non leaf node
+ tree[node] = NODE;
+
+ // move down the path recursively
+ int nextChild = 2 * node + 1 + (path & 1);
+ addLeaf(nextChild, path >>> 1, depth - 1, value);
+ }
+ }
+
+ /**
+ * Reads a value from the specified bit stream.
+ *
+ * @param stream
+ * @return the value decoded, or -1 if the end of the stream is reached
+ */
+ public int read(BitStream stream) throws IOException {
+ int currentIndex = 0;
+
+ while (true) {
+ int bit = stream.nextBit();
+ if (bit == -1) {
+ return -1;
+ }
+
+ int childIndex = 2 * currentIndex + 1 + bit;
+ int value = tree[childIndex];
+ if (value == NODE) {
+ // consume the next bit
+ currentIndex = childIndex;
+ } else if (value != UNDEFINED) {
+ return value;
+ } else {
+ throw new IOException("The child " + bit + " of node at index " + currentIndex + " is not defined");
+ }
+ }
+ }
+
+
+ /**
+ * Decodes the packed binary tree from the specified stream.
+ */
+ static BinaryTree decode(InputStream in, final int totalNumberOfValues) throws IOException {
+ // the first byte contains the size of the structure minus one
+ int size = in.read() + 1;
+ if (size == 0) {
+ throw new IOException("Cannot read the size of the encoded tree, unexpected end of stream");
+ }
+
+ byte[] encodedTree = new byte[size];
+ new DataInputStream(in).readFully(encodedTree);
+
+ /** The maximum bit length for a value (16 or lower) */
+ int maxLength = 0;
+
+ int[] originalBitLengths = new int[totalNumberOfValues];
+ int pos = 0;
+ for (byte b : encodedTree) {
+ // each byte encodes the number of values (upper 4 bits) for a bit length (lower 4 bits)
+ int numberOfValues = ((b & 0xF0) >> 4) + 1;
+ int bitLength = (b & 0x0F) + 1;
+
+ for (int j = 0; j < numberOfValues; j++) {
+ originalBitLengths[pos++] = bitLength;
+ }
+
+ maxLength = Math.max(maxLength, bitLength);
+ }
+
+ // sort the array of bit lengths and memorize the permutation used to restore the order of the codes
+ int[] permutation = new int[originalBitLengths.length];
+ for (int k = 0; k < permutation.length; k++) {
+ permutation[k] = k;
+ }
+
+ int c = 0;
+ int[] sortedBitLengths = new int[originalBitLengths.length];
+ for (int k = 0; k < originalBitLengths.length; k++) {
+ // iterate over the values
+ for (int l = 0; l < originalBitLengths.length; l++) {
+ // look for the value in the original array
+ if (originalBitLengths[l] == k) {
+ // put the value at the current position in the sorted array...
+ sortedBitLengths[c] = k;
+
+ // ...and memorize the permutation
+ permutation[c] = l;
+
+ c++;
+ }
+ }
+ }
+
+ // decode the values of the tree
+ int code = 0;
+ int codeIncrement = 0;
+ int lastBitLength = 0;
+
+ int[] codes = new int[totalNumberOfValues];
+
+ for (int i = totalNumberOfValues - 1; i >= 0; i--) {
+ code = code + codeIncrement;
+ if (sortedBitLengths[i] != lastBitLength) {
+ lastBitLength = sortedBitLengths[i];
+ codeIncrement = 1 << (16 - lastBitLength);
+ }
+ codes[permutation[i]] = code;
+ }
+
+ // build the tree
+ BinaryTree tree = new BinaryTree(maxLength);
+
+ for (int k = 0; k < codes.length; k++) {
+ int bitLength = originalBitLengths[k];
+ if (bitLength > 0) {
+ tree.addLeaf(0, Integer.reverse(codes[k] << 16), bitLength, k);
+ }
+ }
+
+ return tree;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/BitStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/BitStream.java
new file mode 100644
index 000000000..c9e6ccf74
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/BitStream.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Iterates over the bits of an InputStream. For each byte the bits
+ * are read from the right to the left.
+ *
+ * @since 1.7
+ */
+class BitStream {
+
+ private final InputStream in;
+
+ /** The bits read from the underlying stream but not consumed by nextBits() */
+ private long bitCache;
+
+ /** The number of bits available in the bit cache */
+ private int bitCacheSize;
+
+ /** Bit masks for extracting the right most bits from a byte */
+ private static final int[] MASKS = new int[]{
+ 0x00, // 00000000
+ 0x01, // 00000001
+ 0x03, // 00000011
+ 0x07, // 00000111
+ 0x0F, // 00001111
+ 0x1F, // 00011111
+ 0x3F, // 00111111
+ 0x7F, // 01111111
+ 0xFF // 11111111
+ };
+
+ BitStream(InputStream in) {
+ this.in = in;
+ }
+
+ private boolean fillCache() throws IOException {
+ boolean filled = false;
+
+ while (bitCacheSize <= 56) {
+ long nextByte = in.read();
+ if (nextByte == -1) {
+ break;
+ }
+
+ filled = true;
+ bitCache = bitCache | (nextByte << bitCacheSize);
+ bitCacheSize += 8;
+ }
+
+ return filled;
+ }
+
+ /**
+ * Returns the next bit.
+ *
+ * @return The next bit (0 or 1) or -1 if the end of the stream has been reached
+ */
+ int nextBit() throws IOException {
+ if (bitCacheSize == 0 && !fillCache()) {
+ return -1;
+ }
+
+ int bit = (int) (bitCache & 1); // extract the right most bit
+
+ bitCache = (bitCache >>> 1); // shift the remaning bits to the right
+ bitCacheSize--;
+
+ return bit;
+ }
+
+ /**
+ * Returns the integer value formed by the n next bits (up to 8 bits).
+ *
+ * @param n the number of bits read (up to 8)
+ * @return The value formed by the n bits, or -1 if the end of the stream has been reached
+ */
+ int nextBits(final int n) throws IOException {
+ if (bitCacheSize < n && !fillCache()) {
+ return -1;
+ }
+
+ final int bits = (int) (bitCache & MASKS[n]); // extract the right most bits
+
+ bitCache = (bitCache >>> n); // shift the remaning bits to the right
+ bitCacheSize = bitCacheSize - n;
+
+ return bits;
+ }
+
+ int nextByte() throws IOException {
+ return nextBits(8);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/CircularBuffer.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/CircularBuffer.java
new file mode 100644
index 000000000..af64a8574
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/CircularBuffer.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+/**
+ * Circular byte buffer.
+ *
+ * @author Emmanuel Bourg
+ * @since 1.7
+ */
+class CircularBuffer {
+
+ /** Size of the buffer */
+ private final int size;
+
+ /** The buffer */
+ private final byte[] buffer;
+
+ /** Index of the next data to be read from the buffer */
+ private int readIndex;
+
+ /** Index of the next data written in the buffer */
+ private int writeIndex;
+
+ CircularBuffer(int size) {
+ this.size = size;
+ buffer = new byte[size];
+ }
+
+ /**
+ * Tells if a new byte can be read from the buffer.
+ */
+ public boolean available() {
+ return readIndex != writeIndex;
+ }
+
+ /**
+ * Writes a byte to the buffer.
+ */
+ public void put(int value) {
+ buffer[writeIndex] = (byte) value;
+ writeIndex = (writeIndex + 1) % size;
+ }
+
+ /**
+ * Reads a byte from the buffer.
+ */
+ public int get() {
+ if (available()) {
+ int value = buffer[readIndex];
+ readIndex = (readIndex + 1) % size;
+ return value & 0xFF;
+ } else {
+ return -1;
+ }
+ }
+
+ /**
+ * Copy a previous interval in the buffer to the current position.
+ *
+ * @param distance the distance from the current write position
+ * @param length the number of bytes to copy
+ */
+ public void copy(int distance, int length) {
+ int pos1 = writeIndex - distance;
+ int pos2 = pos1 + length;
+ for (int i = pos1; i < pos2; i++) {
+ buffer[writeIndex] = buffer[(i + size) % size];
+ writeIndex = (writeIndex + 1) % size;
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ExplodingInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ExplodingInputStream.java
new file mode 100644
index 000000000..aa9a5ce39
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ExplodingInputStream.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * The implode compression method was added to PKZIP 1.01 released in 1989.
+ * It was then dropped from PKZIP 2.0 released in 1993 in favor of the deflate
+ * method.
+ *
+ * The algorithm is described in the ZIP File Format Specification.
+ *
+ * @see ZIP File Format Specification
+ *
+ * @author Emmanuel Bourg
+ * @since 1.7
+ */
+class ExplodingInputStream extends InputStream {
+
+ /** The underlying stream containing the compressed data */
+ private final InputStream in;
+
+ /** The stream of bits read from the input stream */
+ private BitStream bits;
+
+ /** The size of the sliding dictionary (4K or 8K) */
+ private final int dictionarySize;
+
+ /** The number of Shannon-Fano trees (2 or 3) */
+ private final int numberOfTrees;
+
+ private final int minimumMatchLength;
+
+ /** The binary tree containing the 256 encoded literals (null when only two trees are used) */
+ private BinaryTree literalTree;
+
+ /** The binary tree containing the 64 encoded lengths */
+ private BinaryTree lengthTree;
+
+ /** The binary tree containing the 64 encoded distances */
+ private BinaryTree distanceTree;
+
+ /** Output buffer holding the decompressed data */
+ private final CircularBuffer buffer = new CircularBuffer(32 * 1024);
+
+ /**
+ * Create a new stream decompressing the content of the specified stream
+ * using the explode algorithm.
+ *
+ * @param dictionarySize the size of the sliding dictionary (4096 or 8192)
+ * @param numberOfTrees the number of trees (2 or 3)
+ * @param in the compressed data stream
+ */
+ public ExplodingInputStream(int dictionarySize, int numberOfTrees, InputStream in) {
+ if (dictionarySize != 4096 && dictionarySize != 8192) {
+ throw new IllegalArgumentException("The dictionary size must be 4096 or 8192");
+ }
+ if (numberOfTrees != 2 && numberOfTrees != 3) {
+ throw new IllegalArgumentException("The number of trees must be 2 or 3");
+ }
+ this.dictionarySize = dictionarySize;
+ this.numberOfTrees = numberOfTrees;
+ this.minimumMatchLength = numberOfTrees;
+ this.in = in;
+ }
+
+ /**
+ * Reads the encoded binary trees and prepares the bit stream.
+ *
+ * @throws IOException
+ */
+ private void init() throws IOException {
+ if (bits == null) {
+ if (numberOfTrees == 3) {
+ literalTree = BinaryTree.decode(in, 256);
+ }
+
+ lengthTree = BinaryTree.decode(in, 64);
+ distanceTree = BinaryTree.decode(in, 64);
+
+ bits = new BitStream(in);
+ }
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (!buffer.available()) {
+ fillBuffer();
+ }
+
+ return buffer.get();
+ }
+
+ /**
+ * Fill the sliding dictionary with more data.
+ * @throws IOException
+ */
+ private void fillBuffer() throws IOException {
+ init();
+
+ int bit = bits.nextBit();
+ if (bit == 1) {
+ // literal value
+ int literal;
+ if (literalTree != null) {
+ literal = literalTree.read(bits);
+ } else {
+ literal = bits.nextBits(8);
+ }
+
+ if (literal == -1) {
+ // end of stream reached, nothing left to decode
+ return;
+ }
+
+ buffer.put(literal);
+
+ } else if (bit == 0) {
+ // back reference
+ int distanceLowSize = dictionarySize == 4096 ? 6 : 7;
+ int distanceLow = bits.nextBits(distanceLowSize);
+ int distanceHigh = distanceTree.read(bits);
+ if (distanceHigh == -1 && distanceLow <= 0) {
+ // end of stream reached, nothing left to decode
+ return;
+ }
+ int distance = distanceHigh << distanceLowSize | distanceLow;
+
+ int length = lengthTree.read(bits);
+ if (length == 63) {
+ length += bits.nextBits(8);
+ }
+ length += minimumMatchLength;
+
+ buffer.copy(distance + 1, length);
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ExtraFieldUtils.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ExtraFieldUtils.java
new file mode 100644
index 000000000..b2fa1dde7
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ExtraFieldUtils.java
@@ -0,0 +1,308 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.zip.ZipException;
+
+/**
+ * ZipExtraField related methods
+ * @NotThreadSafe because the HashMap is not synch.
+ */
+// CheckStyle:HideUtilityClassConstructorCheck OFF (bc)
+public class ExtraFieldUtils {
+
+ private static final int WORD = 4;
+
+ /**
+ * Static registry of known extra fields.
+ */
+ private static final Map> implementations;
+
+ static {
+ implementations = new ConcurrentHashMap>();
+ register(AsiExtraField.class);
+ register(X5455_ExtendedTimestamp.class);
+ register(X7875_NewUnix.class);
+ register(JarMarker.class);
+ register(UnicodePathExtraField.class);
+ register(UnicodeCommentExtraField.class);
+ register(Zip64ExtendedInformationExtraField.class);
+ }
+
+ /**
+ * Register a ZipExtraField implementation.
+ *
+ * The given class must have a no-arg constructor and implement
+ * the {@link ZipExtraField ZipExtraField interface}.
+ * @param c the class to register
+ */
+ public static void register(Class> c) {
+ try {
+ ZipExtraField ze = (ZipExtraField) c.newInstance();
+ implementations.put(ze.getHeaderId(), c);
+ } catch (ClassCastException cc) {
+ throw new RuntimeException(c + " doesn\'t implement ZipExtraField");
+ } catch (InstantiationException ie) {
+ throw new RuntimeException(c + " is not a concrete class");
+ } catch (IllegalAccessException ie) {
+ throw new RuntimeException(c + "\'s no-arg constructor is not public");
+ }
+ }
+
+ /**
+ * Create an instance of the appropriate ExtraField, falls back to
+ * {@link UnrecognizedExtraField UnrecognizedExtraField}.
+ * @param headerId the header identifier
+ * @return an instance of the appropriate ExtraField
+ * @exception InstantiationException if unable to instantiate the class
+ * @exception IllegalAccessException if not allowed to instantiate the class
+ */
+ public static ZipExtraField createExtraField(ZipShort headerId)
+ throws InstantiationException, IllegalAccessException {
+ Class> c = implementations.get(headerId);
+ if (c != null) {
+ return (ZipExtraField) c.newInstance();
+ }
+ UnrecognizedExtraField u = new UnrecognizedExtraField();
+ u.setHeaderId(headerId);
+ return u;
+ }
+
+ /**
+ * Split the array into ExtraFields and populate them with the
+ * given data as local file data, throwing an exception if the
+ * data cannot be parsed.
+ * @param data an array of bytes as it appears in local file data
+ * @return an array of ExtraFields
+ * @throws ZipException on error
+ */
+ public static ZipExtraField[] parse(byte[] data) throws ZipException {
+ return parse(data, true, UnparseableExtraField.THROW);
+ }
+
+ /**
+ * Split the array into ExtraFields and populate them with the
+ * given data, throwing an exception if the data cannot be parsed.
+ * @param data an array of bytes
+ * @param local whether data originates from the local file data
+ * or the central directory
+ * @return an array of ExtraFields
+ * @throws ZipException on error
+ */
+ public static ZipExtraField[] parse(byte[] data, boolean local)
+ throws ZipException {
+ return parse(data, local, UnparseableExtraField.THROW);
+ }
+
+ /**
+ * Split the array into ExtraFields and populate them with the
+ * given data.
+ * @param data an array of bytes
+ * @param local whether data originates from the local file data
+ * or the central directory
+ * @param onUnparseableData what to do if the extra field data
+ * cannot be parsed.
+ * @return an array of ExtraFields
+ * @throws ZipException on error
+ *
+ * @since 1.1
+ */
+ public static ZipExtraField[] parse(byte[] data, boolean local,
+ UnparseableExtraField onUnparseableData)
+ throws ZipException {
+ List v = new ArrayList();
+ int start = 0;
+ LOOP:
+ while (start <= data.length - WORD) {
+ ZipShort headerId = new ZipShort(data, start);
+ int length = new ZipShort(data, start + 2).getValue();
+ if (start + WORD + length > data.length) {
+ switch(onUnparseableData.getKey()) {
+ case UnparseableExtraField.THROW_KEY:
+ throw new ZipException("bad extra field starting at "
+ + start + ". Block length of "
+ + length + " bytes exceeds remaining"
+ + " data of "
+ + (data.length - start - WORD)
+ + " bytes.");
+ case UnparseableExtraField.READ_KEY:
+ UnparseableExtraFieldData field =
+ new UnparseableExtraFieldData();
+ if (local) {
+ field.parseFromLocalFileData(data, start,
+ data.length - start);
+ } else {
+ field.parseFromCentralDirectoryData(data, start,
+ data.length - start);
+ }
+ v.add(field);
+ //$FALL-THROUGH$
+ case UnparseableExtraField.SKIP_KEY:
+ // since we cannot parse the data we must assume
+ // the extra field consumes the whole rest of the
+ // available data
+ break LOOP;
+ default:
+ throw new ZipException("unknown UnparseableExtraField key: "
+ + onUnparseableData.getKey());
+ }
+ }
+ try {
+ ZipExtraField ze = createExtraField(headerId);
+ if (local) {
+ ze.parseFromLocalFileData(data, start + WORD, length);
+ } else {
+ ze.parseFromCentralDirectoryData(data, start + WORD,
+ length);
+ }
+ v.add(ze);
+ } catch (InstantiationException ie) {
+ throw (ZipException) new ZipException(ie.getMessage()).initCause(ie);
+ } catch (IllegalAccessException iae) {
+ throw (ZipException) new ZipException(iae.getMessage()).initCause(iae);
+ }
+ start += length + WORD;
+ }
+
+ ZipExtraField[] result = new ZipExtraField[v.size()];
+ return v.toArray(result);
+ }
+
+ /**
+ * Merges the local file data fields of the given ZipExtraFields.
+ * @param data an array of ExtraFiles
+ * @return an array of bytes
+ */
+ public static byte[] mergeLocalFileDataData(ZipExtraField[] data) {
+ final boolean lastIsUnparseableHolder = data.length > 0
+ && data[data.length - 1] instanceof UnparseableExtraFieldData;
+ int regularExtraFieldCount =
+ lastIsUnparseableHolder ? data.length - 1 : data.length;
+
+ int sum = WORD * regularExtraFieldCount;
+ for (ZipExtraField element : data) {
+ sum += element.getLocalFileDataLength().getValue();
+ }
+
+ byte[] result = new byte[sum];
+ int start = 0;
+ for (int i = 0; i < regularExtraFieldCount; i++) {
+ System.arraycopy(data[i].getHeaderId().getBytes(),
+ 0, result, start, 2);
+ System.arraycopy(data[i].getLocalFileDataLength().getBytes(),
+ 0, result, start + 2, 2);
+ byte[] local = data[i].getLocalFileDataData();
+ System.arraycopy(local, 0, result, start + WORD, local.length);
+ start += local.length + WORD;
+ }
+ if (lastIsUnparseableHolder) {
+ byte[] local = data[data.length - 1].getLocalFileDataData();
+ System.arraycopy(local, 0, result, start, local.length);
+ }
+ return result;
+ }
+
+ /**
+ * Merges the central directory fields of the given ZipExtraFields.
+ * @param data an array of ExtraFields
+ * @return an array of bytes
+ */
+ public static byte[] mergeCentralDirectoryData(ZipExtraField[] data) {
+ final boolean lastIsUnparseableHolder = data.length > 0
+ && data[data.length - 1] instanceof UnparseableExtraFieldData;
+ int regularExtraFieldCount =
+ lastIsUnparseableHolder ? data.length - 1 : data.length;
+
+ int sum = WORD * regularExtraFieldCount;
+ for (ZipExtraField element : data) {
+ sum += element.getCentralDirectoryLength().getValue();
+ }
+ byte[] result = new byte[sum];
+ int start = 0;
+ for (int i = 0; i < regularExtraFieldCount; i++) {
+ System.arraycopy(data[i].getHeaderId().getBytes(),
+ 0, result, start, 2);
+ System.arraycopy(data[i].getCentralDirectoryLength().getBytes(),
+ 0, result, start + 2, 2);
+ byte[] local = data[i].getCentralDirectoryData();
+ System.arraycopy(local, 0, result, start + WORD, local.length);
+ start += local.length + WORD;
+ }
+ if (lastIsUnparseableHolder) {
+ byte[] local = data[data.length - 1].getCentralDirectoryData();
+ System.arraycopy(local, 0, result, start, local.length);
+ }
+ return result;
+ }
+
+ /**
+ * "enum" for the possible actions to take if the extra field
+ * cannot be parsed.
+ *
+ * @since 1.1
+ */
+ public static final class UnparseableExtraField {
+ /**
+ * Key for "throw an exception" action.
+ */
+ public static final int THROW_KEY = 0;
+ /**
+ * Key for "skip" action.
+ */
+ public static final int SKIP_KEY = 1;
+ /**
+ * Key for "read" action.
+ */
+ public static final int READ_KEY = 2;
+
+ /**
+ * Throw an exception if field cannot be parsed.
+ */
+ public static final UnparseableExtraField THROW
+ = new UnparseableExtraField(THROW_KEY);
+
+ /**
+ * Skip the extra field entirely and don't make its data
+ * available - effectively removing the extra field data.
+ */
+ public static final UnparseableExtraField SKIP
+ = new UnparseableExtraField(SKIP_KEY);
+
+ /**
+ * Read the extra field data into an instance of {@link
+ * UnparseableExtraFieldData UnparseableExtraFieldData}.
+ */
+ public static final UnparseableExtraField READ
+ = new UnparseableExtraField(READ_KEY);
+
+ private final int key;
+
+ private UnparseableExtraField(int k) {
+ key = k;
+ }
+
+ /**
+ * Key of the action to take.
+ */
+ public int getKey() { return key; }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/FallbackZipEncoding.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/FallbackZipEncoding.java
new file mode 100644
index 000000000..4baae8aba
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/FallbackZipEncoding.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * A fallback ZipEncoding, which uses a java.io means to encode names.
+ *
+ * This implementation is not suitable for encodings other than
+ * UTF-8, because java.io encodes unmappable character as question
+ * marks leading to unreadable ZIP entries on some operating
+ * systems.
+ *
+ * Furthermore this implementation is unable to tell whether a
+ * given name can be safely encoded or not.
+ *
+ * This implementation acts as a last resort implementation, when
+ * neither {@link Simple8BitZipEnoding} nor {@link NioZipEncoding} is
+ * available.
+ *
+ * The methods of this class are reentrant.
+ * @Immutable
+ */
+class FallbackZipEncoding implements ZipEncoding {
+ private final String charsetName;
+
+ /**
+ * Construct a fallback zip encoding, which uses the platform's
+ * default charset.
+ */
+ public FallbackZipEncoding() {
+ this.charsetName = null;
+ }
+
+ /**
+ * Construct a fallback zip encoding, which uses the given charset.
+ *
+ * @param charsetName The name of the charset or {@code null} for
+ * the platform's default character set.
+ */
+ public FallbackZipEncoding(String charsetName) {
+ this.charsetName = charsetName;
+ }
+
+ /**
+ * @see
+ * org.apache.commons.compress.archivers.zip.ZipEncoding#canEncode(java.lang.String)
+ */
+ public boolean canEncode(String name) {
+ return true;
+ }
+
+ /**
+ * @see
+ * org.apache.commons.compress.archivers.zip.ZipEncoding#encode(java.lang.String)
+ */
+ public ByteBuffer encode(String name) throws IOException {
+ if (this.charsetName == null) { // i.e. use default charset, see no-args constructor
+ return ByteBuffer.wrap(name.getBytes());
+ } else {
+ return ByteBuffer.wrap(name.getBytes(this.charsetName));
+ }
+ }
+
+ /**
+ * @see
+ * org.apache.commons.compress.archivers.zip.ZipEncoding#decode(byte[])
+ */
+ public String decode(byte[] data) throws IOException {
+ if (this.charsetName == null) { // i.e. use default charset, see no-args constructor
+ return new String(data);
+ } else {
+ return new String(data,this.charsetName);
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/GeneralPurposeBit.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/GeneralPurposeBit.java
new file mode 100644
index 000000000..d4b4c3dd3
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/GeneralPurposeBit.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+/**
+ * Parser/encoder for the "general purpose bit" field in ZIP's local
+ * file and central directory headers.
+ *
+ * @since 1.1
+ * @NotThreadSafe
+ */
+public final class GeneralPurposeBit {
+
+ /**
+ * Indicates that the file is encrypted.
+ */
+ private static final int ENCRYPTION_FLAG = 1 << 0;
+
+ /**
+ * Indicates the size of the sliding dictionary used by the compression method 6 (imploding).
+ *
+ * - 0: 4096 bytes
+ * - 1: 8192 bytes
+ *
+ */
+ private static final int SLIDING_DICTIONARY_SIZE_FLAG = 1 << 1;
+
+ /**
+ * Indicates the number of Shannon-Fano trees used by the compression method 6 (imploding).
+ *
+ * - 0: 2 trees (lengths, distances)
+ * - 1: 3 trees (literals, lengths, distances)
+ *
+ */
+ private static final int NUMBER_OF_SHANNON_FANO_TREES_FLAG = 1 << 2;
+
+ /**
+ * Indicates that a data descriptor stored after the file contents
+ * will hold CRC and size information.
+ */
+ private static final int DATA_DESCRIPTOR_FLAG = 1 << 3;
+
+ /**
+ * Indicates strong encryption.
+ */
+ private static final int STRONG_ENCRYPTION_FLAG = 1 << 6;
+
+ /**
+ * Indicates that filenames are written in UTF-8.
+ *
+ * The only reason this is public is that {@link
+ * ZipArchiveOutputStream#EFS_FLAG} was public in Apache Commons
+ * Compress 1.0 and we needed a substitute for it.
+ */
+ public static final int UFT8_NAMES_FLAG = 1 << 11;
+
+ private boolean languageEncodingFlag = false;
+ private boolean dataDescriptorFlag = false;
+ private boolean encryptionFlag = false;
+ private boolean strongEncryptionFlag = false;
+ private int slidingDictionarySize;
+ private int numberOfShannonFanoTrees;
+
+ public GeneralPurposeBit() {
+ }
+
+ /**
+ * whether the current entry uses UTF8 for file name and comment.
+ */
+ public boolean usesUTF8ForNames() {
+ return languageEncodingFlag;
+ }
+
+ /**
+ * whether the current entry will use UTF8 for file name and comment.
+ */
+ public void useUTF8ForNames(boolean b) {
+ languageEncodingFlag = b;
+ }
+
+ /**
+ * whether the current entry uses the data descriptor to store CRC
+ * and size information
+ */
+ public boolean usesDataDescriptor() {
+ return dataDescriptorFlag;
+ }
+
+ /**
+ * whether the current entry will use the data descriptor to store
+ * CRC and size information
+ */
+ public void useDataDescriptor(boolean b) {
+ dataDescriptorFlag = b;
+ }
+
+ /**
+ * whether the current entry is encrypted
+ */
+ public boolean usesEncryption() {
+ return encryptionFlag;
+ }
+
+ /**
+ * whether the current entry will be encrypted
+ */
+ public void useEncryption(boolean b) {
+ encryptionFlag = b;
+ }
+
+ /**
+ * whether the current entry is encrypted using strong encryption
+ */
+ public boolean usesStrongEncryption() {
+ return encryptionFlag && strongEncryptionFlag;
+ }
+
+ /**
+ * whether the current entry will be encrypted using strong encryption
+ */
+ public void useStrongEncryption(boolean b) {
+ strongEncryptionFlag = b;
+ if (b) {
+ useEncryption(true);
+ }
+ }
+
+ /**
+ * Returns the sliding dictionary size used by the compression method 6 (imploding).
+ */
+ int getSlidingDictionarySize() {
+ return slidingDictionarySize;
+ }
+
+ /**
+ * Returns the number of trees used by the compression method 6 (imploding).
+ */
+ int getNumberOfShannonFanoTrees() {
+ return numberOfShannonFanoTrees;
+ }
+
+ /**
+ * Encodes the set bits in a form suitable for ZIP archives.
+ */
+ public byte[] encode() {
+ return
+ ZipShort.getBytes((dataDescriptorFlag ? DATA_DESCRIPTOR_FLAG : 0)
+ |
+ (languageEncodingFlag ? UFT8_NAMES_FLAG : 0)
+ |
+ (encryptionFlag ? ENCRYPTION_FLAG : 0)
+ |
+ (strongEncryptionFlag ? STRONG_ENCRYPTION_FLAG : 0)
+ );
+ }
+
+ /**
+ * Parses the supported flags from the given archive data.
+ *
+ * @param data local file header or a central directory entry.
+ * @param offset offset at which the general purpose bit starts
+ */
+ public static GeneralPurposeBit parse(final byte[] data, final int offset) {
+ final int generalPurposeFlag = ZipShort.getValue(data, offset);
+ GeneralPurposeBit b = new GeneralPurposeBit();
+ b.useDataDescriptor((generalPurposeFlag & DATA_DESCRIPTOR_FLAG) != 0);
+ b.useUTF8ForNames((generalPurposeFlag & UFT8_NAMES_FLAG) != 0);
+ b.useStrongEncryption((generalPurposeFlag & STRONG_ENCRYPTION_FLAG) != 0);
+ b.useEncryption((generalPurposeFlag & ENCRYPTION_FLAG) != 0);
+ b.slidingDictionarySize = (generalPurposeFlag & SLIDING_DICTIONARY_SIZE_FLAG) != 0 ? 8192 : 4096;
+ b.numberOfShannonFanoTrees = (generalPurposeFlag & NUMBER_OF_SHANNON_FANO_TREES_FLAG) != 0 ? 3 : 2;
+ return b;
+ }
+
+ @Override
+ public int hashCode() {
+ return 3 * (7 * (13 * (17 * (encryptionFlag ? 1 : 0)
+ + (strongEncryptionFlag ? 1 : 0))
+ + (languageEncodingFlag ? 1 : 0))
+ + (dataDescriptorFlag ? 1 : 0));
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof GeneralPurposeBit)) {
+ return false;
+ }
+ GeneralPurposeBit g = (GeneralPurposeBit) o;
+ return g.encryptionFlag == encryptionFlag
+ && g.strongEncryptionFlag == strongEncryptionFlag
+ && g.languageEncodingFlag == languageEncodingFlag
+ && g.dataDescriptorFlag == dataDescriptorFlag;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/JarMarker.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/JarMarker.java
new file mode 100644
index 000000000..f5dde8510
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/JarMarker.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.util.zip.ZipException;
+
+/**
+ * If this extra field is added as the very first extra field of the
+ * archive, Solaris will consider it an executable jar file.
+ * @Immutable
+ */
+public final class JarMarker implements ZipExtraField {
+
+ private static final ZipShort ID = new ZipShort(0xCAFE);
+ private static final ZipShort NULL = new ZipShort(0);
+ private static final byte[] NO_BYTES = new byte[0];
+ private static final JarMarker DEFAULT = new JarMarker();
+
+ /** No-arg constructor */
+ public JarMarker() {
+ // empty
+ }
+
+ /**
+ * Since JarMarker is stateless we can always use the same instance.
+ * @return the DEFAULT jarmaker.
+ */
+ public static JarMarker getInstance() {
+ return DEFAULT;
+ }
+
+ /**
+ * The Header-ID.
+ * @return the header id
+ */
+ public ZipShort getHeaderId() {
+ return ID;
+ }
+
+ /**
+ * Length of the extra field in the local file data - without
+ * Header-ID or length specifier.
+ * @return 0
+ */
+ public ZipShort getLocalFileDataLength() {
+ return NULL;
+ }
+
+ /**
+ * Length of the extra field in the central directory - without
+ * Header-ID or length specifier.
+ * @return 0
+ */
+ public ZipShort getCentralDirectoryLength() {
+ return NULL;
+ }
+
+ /**
+ * The actual data to put into local file data - without Header-ID
+ * or length specifier.
+ * @return the data
+ */
+ public byte[] getLocalFileDataData() {
+ return NO_BYTES;
+ }
+
+ /**
+ * The actual data to put central directory - without Header-ID or
+ * length specifier.
+ * @return the data
+ */
+ public byte[] getCentralDirectoryData() {
+ return NO_BYTES;
+ }
+
+ /**
+ * Populate data from this array as if it was in local file data.
+ * @param data an array of bytes
+ * @param offset the start offset
+ * @param length the number of bytes in the array from offset
+ *
+ * @throws ZipException on error
+ */
+ public void parseFromLocalFileData(byte[] data, int offset, int length)
+ throws ZipException {
+ if (length != 0) {
+ throw new ZipException("JarMarker doesn't expect any data");
+ }
+ }
+
+ /**
+ * Doesn't do anything special since this class always uses the
+ * same data in central directory and local file data.
+ */
+ public void parseFromCentralDirectoryData(byte[] buffer, int offset,
+ int length)
+ throws ZipException {
+ parseFromLocalFileData(buffer, offset, length);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/NioZipEncoding.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/NioZipEncoding.java
new file mode 100644
index 000000000..f93192c34
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/NioZipEncoding.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.CharBuffer;
+import java.nio.charset.Charset;
+import java.nio.charset.CharsetEncoder;
+import java.nio.charset.CoderResult;
+import java.nio.charset.CodingErrorAction;
+
+/**
+ * A ZipEncoding, which uses a java.nio {@link
+ * java.nio.charset.Charset Charset} to encode names.
+ *
+ * This implementation works for all cases under java-1.5 or
+ * later. However, in java-1.4, some charsets don't have a java.nio
+ * implementation, most notably the default ZIP encoding Cp437.
+ *
+ * The methods of this class are reentrant.
+ * @Immutable
+ */
+class NioZipEncoding implements ZipEncoding {
+ private final Charset charset;
+
+ /**
+ * Construct an NIO based zip encoding, which wraps the given
+ * charset.
+ *
+ * @param charset The NIO charset to wrap.
+ */
+ public NioZipEncoding(Charset charset) {
+ this.charset = charset;
+ }
+
+ /**
+ * @see
+ * org.apache.commons.compress.archivers.zip.ZipEncoding#canEncode(java.lang.String)
+ */
+ public boolean canEncode(String name) {
+ CharsetEncoder enc = this.charset.newEncoder();
+ enc.onMalformedInput(CodingErrorAction.REPORT);
+ enc.onUnmappableCharacter(CodingErrorAction.REPORT);
+
+ return enc.canEncode(name);
+ }
+
+ /**
+ * @see
+ * org.apache.commons.compress.archivers.zip.ZipEncoding#encode(java.lang.String)
+ */
+ public ByteBuffer encode(String name) {
+ CharsetEncoder enc = this.charset.newEncoder();
+
+ enc.onMalformedInput(CodingErrorAction.REPORT);
+ enc.onUnmappableCharacter(CodingErrorAction.REPORT);
+
+ CharBuffer cb = CharBuffer.wrap(name);
+ ByteBuffer out = ByteBuffer.allocate(name.length()
+ + (name.length() + 1) / 2);
+
+ while (cb.remaining() > 0) {
+ CoderResult res = enc.encode(cb, out,true);
+
+ if (res.isUnmappable() || res.isMalformed()) {
+
+ // write the unmappable characters in utf-16
+ // pseudo-URL encoding style to ByteBuffer.
+ if (res.length() * 6 > out.remaining()) {
+ out = ZipEncodingHelper.growBuffer(out, out.position()
+ + res.length() * 6);
+ }
+
+ for (int i=0; i
+ * Characters 0x0000 to 0x007f are encoded as the corresponding
+ * byte values 0x00 to 0x7f.
+ * All byte codes from 0x80 to 0xff are mapped to a unique unicode
+ * character in the range 0x0080 to 0x7fff. (No support for
+ * UTF-16 surrogates)
+ *
+ *
+ * These restrictions most notably apply to the most prominent
+ * omissions of java-1.4's {@link java.nio.charset.Charset Charset}
+ * implementation, Cp437 and Cp850.
+ *
+ * The methods of this class are reentrant.
+ * @Immutable
+ */
+class Simple8BitZipEncoding implements ZipEncoding {
+
+ /**
+ * A character entity, which is put to the reverse mapping table
+ * of a simple encoding.
+ */
+ private static final class Simple8BitChar implements Comparable {
+ public final char unicode;
+ public final byte code;
+
+ Simple8BitChar(byte code, char unicode) {
+ this.code = code;
+ this.unicode = unicode;
+ }
+
+ public int compareTo(Simple8BitChar a) {
+ return this.unicode - a.unicode;
+ }
+
+ @Override
+ public String toString() {
+ return "0x" + Integer.toHexString(0xffff & unicode)
+ + "->0x" + Integer.toHexString(0xff & code);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof Simple8BitChar) {
+ Simple8BitChar other = (Simple8BitChar) o;
+ return unicode == other.unicode && code == other.code;
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return unicode;
+ }
+ }
+
+ /**
+ * The characters for byte values of 128 to 255 stored as an array of
+ * 128 chars.
+ */
+ private final char[] highChars;
+
+ /**
+ * A list of {@link Simple8BitChar} objects sorted by the unicode
+ * field. This list is used to binary search reverse mapping of
+ * unicode characters with a character code greater than 127.
+ */
+ private final List reverseMapping;
+
+ /**
+ * @param highChars The characters for byte values of 128 to 255
+ * stored as an array of 128 chars.
+ */
+ public Simple8BitZipEncoding(char[] highChars) {
+ this.highChars = highChars.clone();
+ List temp =
+ new ArrayList(this.highChars.length);
+
+ byte code = 127;
+
+ for (char highChar : this.highChars) {
+ temp.add(new Simple8BitChar(++code, highChar));
+ }
+
+ Collections.sort(temp);
+ this.reverseMapping = Collections.unmodifiableList(temp);
+ }
+
+ /**
+ * Return the character code for a given encoded byte.
+ *
+ * @param b The byte to decode.
+ * @return The associated character value.
+ */
+ public char decodeByte(byte b) {
+ // code 0-127
+ if (b >= 0) {
+ return (char) b;
+ }
+
+ // byte is signed, so 128 == -128 and 255 == -1
+ return this.highChars[128 + b];
+ }
+
+ /**
+ * @param c The character to encode.
+ * @return Whether the given unicode character is covered by this encoding.
+ */
+ public boolean canEncodeChar(char c) {
+
+ if (c >= 0 && c < 128) {
+ return true;
+ }
+
+ Simple8BitChar r = this.encodeHighChar(c);
+ return r != null;
+ }
+
+ /**
+ * Pushes the encoded form of the given character to the given byte buffer.
+ *
+ * @param bb The byte buffer to write to.
+ * @param c The character to encode.
+ * @return Whether the given unicode character is covered by this encoding.
+ * If {@code false} is returned, nothing is pushed to the
+ * byte buffer.
+ */
+ public boolean pushEncodedChar(ByteBuffer bb, char c) {
+
+ if (c >= 0 && c < 128) {
+ bb.put((byte) c);
+ return true;
+ }
+
+ Simple8BitChar r = this.encodeHighChar(c);
+ if (r == null) {
+ return false;
+ }
+ bb.put(r.code);
+ return true;
+ }
+
+ /**
+ * @param c A unicode character in the range from 0x0080 to 0x7f00
+ * @return A Simple8BitChar, if this character is covered by this encoding.
+ * A {@code null} value is returned, if this character is not
+ * covered by this encoding.
+ */
+ private Simple8BitChar encodeHighChar(char c) {
+ // for performance an simplicity, yet another reincarnation of
+ // binary search...
+ int i0 = 0;
+ int i1 = this.reverseMapping.size();
+
+ while (i1 > i0) {
+
+ int i = i0 + (i1 - i0) / 2;
+
+ Simple8BitChar m = this.reverseMapping.get(i);
+
+ if (m.unicode == c) {
+ return m;
+ }
+
+ if (m.unicode < c) {
+ i0 = i + 1;
+ } else {
+ i1 = i;
+ }
+ }
+
+ if (i0 >= this.reverseMapping.size()) {
+ return null;
+ }
+
+ Simple8BitChar r = this.reverseMapping.get(i0);
+
+ if (r.unicode != c) {
+ return null;
+ }
+
+ return r;
+ }
+
+ /**
+ * @see
+ * org.apache.commons.compress.archivers.zip.ZipEncoding#canEncode(java.lang.String)
+ */
+ public boolean canEncode(String name) {
+
+ for (int i=0;iStores the UTF-8 version of the file comment as stored in the
+ * central directory header.
+ *
+ * @see PKWARE's
+ * APPNOTE.TXT, section 4.6.8
+ *
+ * @NotThreadSafe super-class is not thread-safe
+ */
+public class UnicodeCommentExtraField extends AbstractUnicodeExtraField {
+
+ public static final ZipShort UCOM_ID = new ZipShort(0x6375);
+
+ public UnicodeCommentExtraField () {
+ }
+
+ /**
+ * Assemble as unicode comment extension from the name given as
+ * text as well as the encoded bytes actually written to the archive.
+ *
+ * @param text The file name
+ * @param bytes the bytes actually written to the archive
+ * @param off The offset of the encoded comment in bytes.
+ * @param len The length of the encoded comment or comment in
+ * bytes.
+ */
+ public UnicodeCommentExtraField(String text, byte[] bytes, int off,
+ int len) {
+ super(text, bytes, off, len);
+ }
+
+ /**
+ * Assemble as unicode comment extension from the comment given as
+ * text as well as the bytes actually written to the archive.
+ *
+ * @param comment The file comment
+ * @param bytes the bytes actually written to the archive
+ */
+ public UnicodeCommentExtraField(String comment, byte[] bytes) {
+ super(comment, bytes);
+ }
+
+ public ZipShort getHeaderId() {
+ return UCOM_ID;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnicodePathExtraField.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnicodePathExtraField.java
new file mode 100644
index 000000000..a60ccb2d8
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnicodePathExtraField.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+/**
+ * Info-ZIP Unicode Path Extra Field (0x7075):
+ *
+ * Stores the UTF-8 version of the file name field as stored in the
+ * local header and central directory header.
+ *
+ * @see PKWARE's
+ * APPNOTE.TXT, section 4.6.9
+ *
+ * @NotThreadSafe super-class is not thread-safe
+ */
+public class UnicodePathExtraField extends AbstractUnicodeExtraField {
+
+ public static final ZipShort UPATH_ID = new ZipShort(0x7075);
+
+ public UnicodePathExtraField () {
+ }
+
+ /**
+ * Assemble as unicode path extension from the name given as
+ * text as well as the encoded bytes actually written to the archive.
+ *
+ * @param text The file name
+ * @param bytes the bytes actually written to the archive
+ * @param off The offset of the encoded filename in bytes.
+ * @param len The length of the encoded filename or comment in
+ * bytes.
+ */
+ public UnicodePathExtraField(String text, byte[] bytes, int off, int len) {
+ super(text, bytes, off, len);
+ }
+
+ /**
+ * Assemble as unicode path extension from the name given as
+ * text as well as the encoded bytes actually written to the archive.
+ *
+ * @param name The file name
+ * @param bytes the bytes actually written to the archive
+ */
+ public UnicodePathExtraField(String name, byte[] bytes) {
+ super(name, bytes);
+ }
+
+ public ZipShort getHeaderId() {
+ return UPATH_ID;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnixStat.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnixStat.java
new file mode 100644
index 000000000..b8afc6bc8
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnixStat.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.zip;
+
+/**
+ * Constants from stat.h on Unix systems.
+ */
+// CheckStyle:InterfaceIsTypeCheck OFF - backward compatible
+public interface UnixStat {
+
+ /**
+ * Bits used for permissions (and sticky bit)
+ */
+ int PERM_MASK = 07777;
+ /**
+ * Indicates symbolic links.
+ */
+ int LINK_FLAG = 0120000;
+ /**
+ * Indicates plain files.
+ */
+ int FILE_FLAG = 0100000;
+ /**
+ * Indicates directories.
+ */
+ int DIR_FLAG = 040000;
+
+ // ----------------------------------------------------------
+ // somewhat arbitrary choices that are quite common for shared
+ // installations
+ // -----------------------------------------------------------
+
+ /**
+ * Default permissions for symbolic links.
+ */
+ int DEFAULT_LINK_PERM = 0777;
+
+ /**
+ * Default permissions for directories.
+ */
+ int DEFAULT_DIR_PERM = 0755;
+
+ /**
+ * Default permissions for plain files.
+ */
+ int DEFAULT_FILE_PERM = 0644;
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnparseableExtraFieldData.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnparseableExtraFieldData.java
new file mode 100644
index 000000000..029cfcdaf
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnparseableExtraFieldData.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.zip;
+
+/**
+ * Wrapper for extra field data that doesn't conform to the recommended format of header-tag + size + data.
+ *
+ * The header-id is artificial (and not listed as a known ID in APPNOTE.TXT). Since it isn't used anywhere
+ * except to satisfy the ZipExtraField contract it shouldn't matter anyway.
+ *
+ * @since 1.1
+ * @NotThreadSafe
+ */
+public final class UnparseableExtraFieldData implements ZipExtraField {
+ private static final ZipShort HEADER_ID = new ZipShort(0xACC1);
+
+ private byte[] localFileData;
+ private byte[] centralDirectoryData;
+
+ /**
+ * The Header-ID.
+ *
+ * @return a completely arbitrary value that should be ignored.
+ */
+ public ZipShort getHeaderId() {
+ return HEADER_ID;
+ }
+
+ /**
+ * Length of the complete extra field in the local file data.
+ *
+ * @return The LocalFileDataLength value
+ */
+ public ZipShort getLocalFileDataLength() {
+ return new ZipShort(localFileData == null ? 0 : localFileData.length);
+ }
+
+ /**
+ * Length of the complete extra field in the central directory.
+ *
+ * @return The CentralDirectoryLength value
+ */
+ public ZipShort getCentralDirectoryLength() {
+ return centralDirectoryData == null
+ ? getLocalFileDataLength()
+ : new ZipShort(centralDirectoryData.length);
+ }
+
+ /**
+ * The actual data to put into local file data.
+ *
+ * @return The LocalFileDataData value
+ */
+ public byte[] getLocalFileDataData() {
+ return ZipUtil.copy(localFileData);
+ }
+
+ /**
+ * The actual data to put into central directory.
+ *
+ * @return The CentralDirectoryData value
+ */
+ public byte[] getCentralDirectoryData() {
+ return centralDirectoryData == null
+ ? getLocalFileDataData() : ZipUtil.copy(centralDirectoryData);
+ }
+
+ /**
+ * Populate data from this array as if it was in local file data.
+ *
+ * @param buffer the buffer to read data from
+ * @param offset offset into buffer to read data
+ * @param length the length of data
+ */
+ public void parseFromLocalFileData(byte[] buffer, int offset, int length) {
+ localFileData = new byte[length];
+ System.arraycopy(buffer, offset, localFileData, 0, length);
+ }
+
+ /**
+ * Populate data from this array as if it was in central directory data.
+ *
+ * @param buffer the buffer to read data from
+ * @param offset offset into buffer to read data
+ * @param length the length of data
+ */
+ public void parseFromCentralDirectoryData(byte[] buffer, int offset,
+ int length) {
+ centralDirectoryData = new byte[length];
+ System.arraycopy(buffer, offset, centralDirectoryData, 0, length);
+ if (localFileData == null) {
+ parseFromLocalFileData(buffer, offset, length);
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnrecognizedExtraField.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnrecognizedExtraField.java
new file mode 100644
index 000000000..e0bdf199d
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnrecognizedExtraField.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.zip;
+
+/**
+ * Simple placeholder for all those extra fields we don't want to deal
+ * with.
+ *
+ * Assumes local file data and central directory entries are
+ * identical - unless told the opposite.
+ * @NotThreadSafe
+ */
+public class UnrecognizedExtraField implements ZipExtraField {
+
+ /**
+ * The Header-ID.
+ */
+ private ZipShort headerId;
+
+ /**
+ * Set the header id.
+ * @param headerId the header id to use
+ */
+ public void setHeaderId(ZipShort headerId) {
+ this.headerId = headerId;
+ }
+
+ /**
+ * Get the header id.
+ * @return the header id
+ */
+ public ZipShort getHeaderId() {
+ return headerId;
+ }
+
+ /**
+ * Extra field data in local file data - without
+ * Header-ID or length specifier.
+ */
+ private byte[] localData;
+
+ /**
+ * Set the extra field data in the local file data -
+ * without Header-ID or length specifier.
+ * @param data the field data to use
+ */
+ public void setLocalFileDataData(byte[] data) {
+ localData = ZipUtil.copy(data);
+ }
+
+ /**
+ * Get the length of the local data.
+ * @return the length of the local data
+ */
+ public ZipShort getLocalFileDataLength() {
+ return new ZipShort(localData.length);
+ }
+
+ /**
+ * Get the local data.
+ * @return the local data
+ */
+ public byte[] getLocalFileDataData() {
+ return ZipUtil.copy(localData);
+ }
+
+ /**
+ * Extra field data in central directory - without
+ * Header-ID or length specifier.
+ */
+ private byte[] centralData;
+
+ /**
+ * Set the extra field data in central directory.
+ * @param data the data to use
+ */
+ public void setCentralDirectoryData(byte[] data) {
+ centralData = ZipUtil.copy(data);
+ }
+
+ /**
+ * Get the central data length.
+ * If there is no central data, get the local file data length.
+ * @return the central data length
+ */
+ public ZipShort getCentralDirectoryLength() {
+ if (centralData != null) {
+ return new ZipShort(centralData.length);
+ }
+ return getLocalFileDataLength();
+ }
+
+ /**
+ * Get the central data.
+ * @return the central data if present, else return the local file data
+ */
+ public byte[] getCentralDirectoryData() {
+ if (centralData != null) {
+ return ZipUtil.copy(centralData);
+ }
+ return getLocalFileDataData();
+ }
+
+ /**
+ * @param data the array of bytes.
+ * @param offset the source location in the data array.
+ * @param length the number of bytes to use in the data array.
+ * @see ZipExtraField#parseFromLocalFileData(byte[], int, int)
+ */
+ public void parseFromLocalFileData(byte[] data, int offset, int length) {
+ byte[] tmp = new byte[length];
+ System.arraycopy(data, offset, tmp, 0, length);
+ setLocalFileDataData(tmp);
+ }
+
+ /**
+ * @param data the array of bytes.
+ * @param offset the source location in the data array.
+ * @param length the number of bytes to use in the data array.
+ * @see ZipExtraField#parseFromCentralDirectoryData(byte[], int, int)
+ */
+ public void parseFromCentralDirectoryData(byte[] data, int offset,
+ int length) {
+ byte[] tmp = new byte[length];
+ System.arraycopy(data, offset, tmp, 0, length);
+ setCentralDirectoryData(tmp);
+ if (localData == null) {
+ setLocalFileDataData(tmp);
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnshrinkingInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnshrinkingInputStream.java
new file mode 100644
index 000000000..0bd4db347
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnshrinkingInputStream.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.commons.compress.compressors.z._internal_.InternalLZWInputStream;
+
+/**
+ * Input stream that decompresses ZIP method 1 (unshrinking). A variation of the LZW algorithm, with some twists.
+ * @NotThreadSafe
+ * @since 1.7
+ */
+class UnshrinkingInputStream extends InternalLZWInputStream {
+ private static final int MAX_CODE_SIZE = 13;
+ private static final int MAX_TABLE_SIZE = 1 << MAX_CODE_SIZE;
+ private final boolean[] isUsed;
+
+ public UnshrinkingInputStream(InputStream inputStream) throws IOException {
+ super(inputStream);
+ setClearCode(codeSize);
+ initializeTables(MAX_CODE_SIZE);
+ isUsed = new boolean[prefixes.length];
+ for (int i = 0; i < (1 << 8); i++) {
+ isUsed[i] = true;
+ }
+ tableSize = clearCode + 1;
+ }
+
+ @Override
+ protected int addEntry(int previousCode, byte character) throws IOException {
+ while ((tableSize < MAX_TABLE_SIZE) && isUsed[tableSize]) {
+ tableSize++;
+ }
+ int idx = addEntry(previousCode, character, MAX_TABLE_SIZE);
+ if (idx >= 0) {
+ isUsed[idx] = true;
+ }
+ return idx;
+ }
+
+ private void partialClear() {
+ final boolean[] isParent = new boolean[MAX_TABLE_SIZE];
+ for (int i = 0; i < isUsed.length; i++) {
+ if (isUsed[i] && prefixes[i] != -1) {
+ isParent[prefixes[i]] = true;
+ }
+ }
+ for (int i = clearCode + 1; i < isParent.length; i++) {
+ if (!isParent[i]) {
+ isUsed[i] = false;
+ prefixes[i] = -1;
+ }
+ }
+ }
+
+ @Override
+ protected int decompressNextSymbol() throws IOException {
+ //
+ // table entry table entry
+ // _____________ _____
+ // table entry / \ / \
+ // ____________/ \ \
+ // / / \ / \ \
+ // +---+---+---+---+---+---+---+---+---+---+
+ // | . | . | . | . | . | . | . | . | . | . |
+ // +---+---+---+---+---+---+---+---+---+---+
+ // |<--------->|<------------->|<----->|<->|
+ // symbol symbol symbol symbol
+ //
+ final int code = readNextCode();
+ if (code < 0) {
+ return -1;
+ } else if (code == clearCode) {
+ final int subCode = readNextCode();
+ if (subCode < 0) {
+ throw new IOException("Unexpected EOF;");
+ } else if (subCode == 1) {
+ if (codeSize < MAX_CODE_SIZE) {
+ codeSize++;
+ } else {
+ throw new IOException("Attempt to increase code size beyond maximum");
+ }
+ } else if (subCode == 2) {
+ partialClear();
+ tableSize = clearCode + 1;
+ } else {
+ throw new IOException("Invalid clear code subcode " + subCode);
+ }
+ return 0;
+ } else {
+ boolean addedUnfinishedEntry = false;
+ int effectiveCode = code;
+ if (!isUsed[code]) {
+ effectiveCode = addRepeatOfPreviousCode();
+ addedUnfinishedEntry = true;
+ }
+ return expandCodeToOutputStack(effectiveCode, addedUnfinishedEntry);
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnsupportedZipFeatureException.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnsupportedZipFeatureException.java
new file mode 100644
index 000000000..b1aad770c
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/UnsupportedZipFeatureException.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+import java.util.zip.ZipException;
+
+/**
+ * Exception thrown when attempting to read or write data for a zip
+ * entry that uses ZIP features not supported by this library.
+ * @since 1.1
+ */
+public class UnsupportedZipFeatureException extends ZipException {
+
+ private final Feature reason;
+ private final ZipArchiveEntry entry;
+ private static final long serialVersionUID = 20130101L;
+
+ /**
+ * Creates an exception.
+ * @param reason the feature that is not supported
+ * @param entry the entry using the feature
+ */
+ public UnsupportedZipFeatureException(Feature reason,
+ ZipArchiveEntry entry) {
+ super("unsupported feature " + reason + " used in entry "
+ + entry.getName());
+ this.reason = reason;
+ this.entry = entry;
+ }
+
+ /**
+ * Creates an exception for archives that use an unsupported
+ * compression algorithm.
+ * @param method the method that is not supported
+ * @param entry the entry using the feature
+ * @since 1.5
+ */
+ public UnsupportedZipFeatureException(ZipMethod method,
+ ZipArchiveEntry entry) {
+ super("unsupported feature method '" + method.name()
+ + "' used in entry " + entry.getName());
+ this.reason = Feature.METHOD;
+ this.entry = entry;
+ }
+
+ /**
+ * Creates an exception when the whole archive uses an unsupported
+ * feature.
+ *
+ * @param reason the feature that is not supported
+ * @since 1.5
+ */
+ public UnsupportedZipFeatureException(Feature reason) {
+ super("unsupported feature " + reason + " used in archive.");
+ this.reason = reason;
+ this.entry = null;
+ }
+
+ /**
+ * The unsupported feature that has been used.
+ */
+ public Feature getFeature() {
+ return reason;
+ }
+
+ /**
+ * The entry using the unsupported feature.
+ */
+ public ZipArchiveEntry getEntry() {
+ return entry;
+ }
+
+ /**
+ * ZIP Features that may or may not be supported.
+ * @since 1.1
+ */
+ public static class Feature {
+ /**
+ * The entry is encrypted.
+ */
+ public static final Feature ENCRYPTION = new Feature("encryption");
+ /**
+ * The entry used an unsupported compression method.
+ */
+ public static final Feature METHOD = new Feature("compression method");
+ /**
+ * The entry uses a data descriptor.
+ */
+ public static final Feature DATA_DESCRIPTOR = new Feature("data descriptor");
+ /**
+ * The archive uses splitting or spanning.
+ * @since 1.5
+ */
+ public static final Feature SPLITTING = new Feature("splitting");
+
+ private final String name;
+
+ private Feature(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public String toString() {
+ return name;
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/X5455_ExtendedTimestamp.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/X5455_ExtendedTimestamp.java
new file mode 100644
index 000000000..b1ac3ee43
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/X5455_ExtendedTimestamp.java
@@ -0,0 +1,587 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.Serializable;
+import java.util.Date;
+import java.util.zip.ZipException;
+
+/**
+ * An extra field that stores additional file and directory timestamp data
+ * for zip entries. Each zip entry can include up to three timestamps
+ * (modify, access, create*). The timestamps are stored as 32 bit unsigned
+ * integers representing seconds since UNIX epoch (Jan 1st, 1970, UTC).
+ * This field improves on zip's default timestamp granularity, since it
+ * allows one to store additional timestamps, and, in addition, the timestamps
+ * are stored using per-second granularity (zip's default behaviour can only store
+ * timestamps to the nearest even second).
+ *
+ * Unfortunately, 32 (unsigned) bits can only store dates up to the year 2106,
+ * and so this extra field will eventually be obsolete. Enjoy it while it lasts!
+ *
+ *
+ * - modifyTime:
+ * most recent time of file/directory modification
+ * (or file/dir creation if the entry has not been
+ * modified since it was created).
+ *
+ * - accessTime:
+ * most recent time file/directory was opened
+ * (e.g., read from disk). Many people disable
+ * their operating systems from updating this value
+ * using the NOATIME mount option to optimize disk behaviour,
+ * and thus it's not always reliable. In those cases
+ * it's always equal to modifyTime.
+ *
+ * - *createTime:
+ * modern linux file systems (e.g., ext2 and newer)
+ * do not appear to store a value like this, and so
+ * it's usually omitted altogether in the zip extra
+ * field. Perhaps other unix systems track this.
+ *
+ *
+ * We're using the field definition given in Info-Zip's source archive:
+ * zip-3.0.tar.gz/proginfo/extrafld.txt
+ *
+ *
+ * Value Size Description
+ * ----- ---- -----------
+ * 0x5455 Short tag for this extra block type ("UT")
+ * TSize Short total data size for this block
+ * Flags Byte info bits
+ * (ModTime) Long time of last modification (UTC/GMT)
+ * (AcTime) Long time of last access (UTC/GMT)
+ * (CrTime) Long time of original creation (UTC/GMT)
+ *
+ * Central-header version:
+ *
+ * Value Size Description
+ * ----- ---- -----------
+ * 0x5455 Short tag for this extra block type ("UT")
+ * TSize Short total data size for this block
+ * Flags Byte info bits (refers to local header!)
+ * (ModTime) Long time of last modification (UTC/GMT)
+ *
+ * @since 1.5
+ */
+public class X5455_ExtendedTimestamp implements ZipExtraField, Cloneable, Serializable {
+ private static final ZipShort HEADER_ID = new ZipShort(0x5455);
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * The bit set inside the flags by when the last modification time
+ * is present in this extra field.
+ */
+ public static final byte MODIFY_TIME_BIT = 1;
+ /**
+ * The bit set inside the flags by when the lasr access time is
+ * present in this extra field.
+ */
+ public static final byte ACCESS_TIME_BIT = 2;
+ /**
+ * The bit set inside the flags by when the original creation time
+ * is present in this extra field.
+ */
+ public static final byte CREATE_TIME_BIT = 4;
+
+ // The 3 boolean fields (below) come from this flags byte. The remaining 5 bits
+ // are ignored according to the current version of the spec (December 2012).
+ private byte flags;
+
+ // Note: even if bit1 and bit2 are set, the Central data will still not contain
+ // access/create fields: only local data ever holds those! This causes
+ // some of our implementation to look a little odd, with seemingly spurious
+ // != null and length checks.
+ private boolean bit0_modifyTimePresent;
+ private boolean bit1_accessTimePresent;
+ private boolean bit2_createTimePresent;
+
+ private ZipLong modifyTime;
+ private ZipLong accessTime;
+ private ZipLong createTime;
+
+ /**
+ * Constructor for X5455_ExtendedTimestamp.
+ */
+ public X5455_ExtendedTimestamp() {}
+
+ /**
+ * The Header-ID.
+ *
+ * @return the value for the header id for this extrafield
+ */
+ public ZipShort getHeaderId() {
+ return HEADER_ID;
+ }
+
+ /**
+ * Length of the extra field in the local file data - without
+ * Header-ID or length specifier.
+ *
+ * @return a ZipShort for the length of the data of this extra field
+ */
+ public ZipShort getLocalFileDataLength() {
+ return new ZipShort(1 +
+ (bit0_modifyTimePresent ? 4 : 0) +
+ (bit1_accessTimePresent && accessTime != null ? 4 : 0) +
+ (bit2_createTimePresent && createTime != null ? 4 : 0)
+ );
+ }
+
+ /**
+ * Length of the extra field in the local file data - without
+ * Header-ID or length specifier.
+ *
+ * For X5455 the central length is often smaller than the
+ * local length, because central cannot contain access or create
+ * timestamps.
+ *
+ * @return a ZipShort for the length of the data of this extra field
+ */
+ public ZipShort getCentralDirectoryLength() {
+ return new ZipShort(1 +
+ (bit0_modifyTimePresent ? 4 : 0)
+ );
+ }
+
+ /**
+ * The actual data to put into local file data - without Header-ID
+ * or length specifier.
+ *
+ * @return get the data
+ */
+ public byte[] getLocalFileDataData() {
+ byte[] data = new byte[getLocalFileDataLength().getValue()];
+ int pos = 0;
+ data[pos++] = 0;
+ if (bit0_modifyTimePresent) {
+ data[0] |= MODIFY_TIME_BIT;
+ System.arraycopy(modifyTime.getBytes(), 0, data, pos, 4);
+ pos += 4;
+ }
+ if (bit1_accessTimePresent && accessTime != null) {
+ data[0] |= ACCESS_TIME_BIT;
+ System.arraycopy(accessTime.getBytes(), 0, data, pos, 4);
+ pos += 4;
+ }
+ if (bit2_createTimePresent && createTime != null) {
+ data[0] |= CREATE_TIME_BIT;
+ System.arraycopy(createTime.getBytes(), 0, data, pos, 4);
+ pos += 4;
+ }
+ return data;
+ }
+
+ /**
+ * The actual data to put into central directory data - without Header-ID
+ * or length specifier.
+ *
+ * @return the central directory data
+ */
+ public byte[] getCentralDirectoryData() {
+ byte[] centralData = new byte[getCentralDirectoryLength().getValue()];
+ byte[] localData = getLocalFileDataData();
+
+ // Truncate out create & access time (last 8 bytes) from
+ // the copy of the local data we obtained:
+ System.arraycopy(localData, 0, centralData, 0, centralData.length);
+ return centralData;
+ }
+
+ /**
+ * Populate data from this array as if it was in local file data.
+ *
+ * @param data an array of bytes
+ * @param offset the start offset
+ * @param length the number of bytes in the array from offset
+ * @throws java.util.zip.ZipException on error
+ */
+ public void parseFromLocalFileData(
+ byte[] data, int offset, int length
+ ) throws ZipException {
+ reset();
+ final int len = offset + length;
+ setFlags(data[offset++]);
+ if (bit0_modifyTimePresent) {
+ modifyTime = new ZipLong(data, offset);
+ offset += 4;
+ }
+
+ // Notice the extra length check in case we are parsing the shorter
+ // central data field (for both access and create timestamps).
+ if (bit1_accessTimePresent && offset + 4 <= len) {
+ accessTime = new ZipLong(data, offset);
+ offset += 4;
+ }
+ if (bit2_createTimePresent && offset + 4 <= len) {
+ createTime = new ZipLong(data, offset);
+ offset += 4;
+ }
+ }
+
+ /**
+ * Doesn't do anything special since this class always uses the
+ * same parsing logic for both central directory and local file data.
+ */
+ public void parseFromCentralDirectoryData(
+ byte[] buffer, int offset, int length
+ ) throws ZipException {
+ reset();
+ parseFromLocalFileData(buffer, offset, length);
+ }
+
+ /**
+ * Reset state back to newly constructed state. Helps us make sure
+ * parse() calls always generate clean results.
+ */
+ private void reset() {
+ setFlags((byte) 0);
+ this.modifyTime = null;
+ this.accessTime = null;
+ this.createTime = null;
+ }
+
+ /**
+ * Sets flags byte. The flags byte tells us which of the
+ * three datestamp fields are present in the data:
+ *
+ * bit0 - modify time
+ * bit1 - access time
+ * bit2 - create time
+ *
+ * Only first 3 bits of flags are used according to the
+ * latest version of the spec (December 2012).
+ *
+ * @param flags flags byte indicating which of the
+ * three datestamp fields are present.
+ */
+ public void setFlags(byte flags) {
+ this.flags = flags;
+ this.bit0_modifyTimePresent = (flags & MODIFY_TIME_BIT) == MODIFY_TIME_BIT;
+ this.bit1_accessTimePresent = (flags & ACCESS_TIME_BIT) == ACCESS_TIME_BIT;
+ this.bit2_createTimePresent = (flags & CREATE_TIME_BIT) == CREATE_TIME_BIT;
+ }
+
+ /**
+ * Gets flags byte. The flags byte tells us which of the
+ * three datestamp fields are present in the data:
+ *
+ * bit0 - modify time
+ * bit1 - access time
+ * bit2 - create time
+ *
+ * Only first 3 bits of flags are used according to the
+ * latest version of the spec (December 2012).
+ *
+ * @return flags byte indicating which of the
+ * three datestamp fields are present.
+ */
+ public byte getFlags() { return flags; }
+
+ /**
+ * Returns whether bit0 of the flags byte is set or not,
+ * which should correspond to the presence or absence of
+ * a modify timestamp in this particular zip entry.
+ *
+ * @return true if bit0 of the flags byte is set.
+ */
+ public boolean isBit0_modifyTimePresent() { return bit0_modifyTimePresent; }
+
+ /**
+ * Returns whether bit1 of the flags byte is set or not,
+ * which should correspond to the presence or absence of
+ * a "last access" timestamp in this particular zip entry.
+ *
+ * @return true if bit1 of the flags byte is set.
+ */
+ public boolean isBit1_accessTimePresent() { return bit1_accessTimePresent; }
+
+ /**
+ * Returns whether bit2 of the flags byte is set or not,
+ * which should correspond to the presence or absence of
+ * a create timestamp in this particular zip entry.
+ *
+ * @return true if bit2 of the flags byte is set.
+ */
+ public boolean isBit2_createTimePresent() { return bit2_createTimePresent; }
+
+ /**
+ * Returns the modify time (seconds since epoch) of this zip entry
+ * as a ZipLong object, or null if no such timestamp exists in the
+ * zip entry.
+ *
+ * @return modify time (seconds since epoch) or null.
+ */
+ public ZipLong getModifyTime() { return modifyTime; }
+
+ /**
+ * Returns the access time (seconds since epoch) of this zip entry
+ * as a ZipLong object, or null if no such timestamp exists in the
+ * zip entry.
+ *
+ * @return access time (seconds since epoch) or null.
+ */
+ public ZipLong getAccessTime() { return accessTime; }
+
+ /**
+ *
+ * Returns the create time (seconds since epoch) of this zip entry
+ * as a ZipLong object, or null if no such timestamp exists in the
+ * zip entry.
+ *
+ * Note: modern linux file systems (e.g., ext2)
+ * do not appear to store a "create time" value, and so
+ * it's usually omitted altogether in the zip extra
+ * field. Perhaps other unix systems track this.
+ *
+ * @return create time (seconds since epoch) or null.
+ */
+ public ZipLong getCreateTime() { return createTime; }
+
+ /**
+ * Returns the modify time as a java.util.Date
+ * of this zip entry, or null if no such timestamp exists in the zip entry.
+ * The milliseconds are always zeroed out, since the underlying data
+ * offers only per-second precision.
+ *
+ * @return modify time as java.util.Date or null.
+ */
+ public Date getModifyJavaTime() {
+ return modifyTime != null ? new Date(modifyTime.getValue() * 1000) : null;
+ }
+
+ /**
+ * Returns the access time as a java.util.Date
+ * of this zip entry, or null if no such timestamp exists in the zip entry.
+ * The milliseconds are always zeroed out, since the underlying data
+ * offers only per-second precision.
+ *
+ * @return access time as java.util.Date or null.
+ */
+ public Date getAccessJavaTime() {
+ return accessTime != null ? new Date(accessTime.getValue() * 1000) : null;
+ }
+
+ /**
+ *
+ * Returns the create time as a a java.util.Date
+ * of this zip entry, or null if no such timestamp exists in the zip entry.
+ * The milliseconds are always zeroed out, since the underlying data
+ * offers only per-second precision.
+ *
+ * Note: modern linux file systems (e.g., ext2)
+ * do not appear to store a "create time" value, and so
+ * it's usually omitted altogether in the zip extra
+ * field. Perhaps other unix systems track this.
+ *
+ * @return create time as java.util.Date or null.
+ */
+ public Date getCreateJavaTime() {
+ return createTime != null ? new Date(createTime.getValue() * 1000) : null;
+ }
+
+ /**
+ *
+ * Sets the modify time (seconds since epoch) of this zip entry
+ * using a ZipLong object.
+ *
+ * Note: the setters for flags and timestamps are decoupled.
+ * Even if the timestamp is not-null, it will only be written
+ * out if the corresponding bit in the flags is also set.
+ *
+ *
+ * @param l ZipLong of the modify time (seconds per epoch)
+ */
+ public void setModifyTime(ZipLong l) {
+ bit0_modifyTimePresent = l != null;
+ flags = (byte) (l != null ? (flags | MODIFY_TIME_BIT)
+ : (flags & ~MODIFY_TIME_BIT));
+ this.modifyTime = l;
+ }
+
+ /**
+ *
+ * Sets the access time (seconds since epoch) of this zip entry
+ * using a ZipLong object
+ *
+ * Note: the setters for flags and timestamps are decoupled.
+ * Even if the timestamp is not-null, it will only be written
+ * out if the corresponding bit in the flags is also set.
+ *
+ *
+ * @param l ZipLong of the access time (seconds per epoch)
+ */
+ public void setAccessTime(ZipLong l) {
+ bit1_accessTimePresent = l != null;
+ flags = (byte) (l != null ? (flags | ACCESS_TIME_BIT)
+ : (flags & ~ACCESS_TIME_BIT));
+ this.accessTime = l;
+ }
+
+ /**
+ *
+ * Sets the create time (seconds since epoch) of this zip entry
+ * using a ZipLong object
+ *
+ * Note: the setters for flags and timestamps are decoupled.
+ * Even if the timestamp is not-null, it will only be written
+ * out if the corresponding bit in the flags is also set.
+ *
+ *
+ * @param l ZipLong of the create time (seconds per epoch)
+ */
+ public void setCreateTime(ZipLong l) {
+ bit2_createTimePresent = l != null;
+ flags = (byte) (l != null ? (flags | CREATE_TIME_BIT)
+ : (flags & ~CREATE_TIME_BIT));
+ this.createTime = l;
+ }
+
+ /**
+ *
+ * Sets the modify time as a java.util.Date
+ * of this zip entry. Supplied value is truncated to per-second
+ * precision (milliseconds zeroed-out).
+ *
+ * Note: the setters for flags and timestamps are decoupled.
+ * Even if the timestamp is not-null, it will only be written
+ * out if the corresponding bit in the flags is also set.
+ *
+ *
+ * @param d modify time as java.util.Date
+ */
+ public void setModifyJavaTime(Date d) { setModifyTime(dateToZipLong(d)); }
+
+ /**
+ *
+ * Sets the access time as a java.util.Date
+ * of this zip entry. Supplied value is truncated to per-second
+ * precision (milliseconds zeroed-out).
+ *
+ * Note: the setters for flags and timestamps are decoupled.
+ * Even if the timestamp is not-null, it will only be written
+ * out if the corresponding bit in the flags is also set.
+ *
+ *
+ * @param d access time as java.util.Date
+ */
+ public void setAccessJavaTime(Date d) { setAccessTime(dateToZipLong(d)); }
+
+ /**
+ *
+ * Sets the create time as a java.util.Date
+ * of this zip entry. Supplied value is truncated to per-second
+ * precision (milliseconds zeroed-out).
+ *
+ * Note: the setters for flags and timestamps are decoupled.
+ * Even if the timestamp is not-null, it will only be written
+ * out if the corresponding bit in the flags is also set.
+ *
+ *
+ * @param d create time as java.util.Date
+ */
+ public void setCreateJavaTime(Date d) { setCreateTime(dateToZipLong(d)); }
+
+ /**
+ * Utility method converts java.util.Date (milliseconds since epoch)
+ * into a ZipLong (seconds since epoch).
+ *
+ * Also makes sure the converted ZipLong is not too big to fit
+ * in 32 unsigned bits.
+ *
+ * @param d java.util.Date to convert to ZipLong
+ * @return ZipLong
+ */
+ private static ZipLong dateToZipLong(final Date d) {
+ if (d == null) { return null; }
+
+ final long TWO_TO_32 = 0x100000000L;
+ final long l = d.getTime() / 1000;
+ if (l >= TWO_TO_32) {
+ throw new IllegalArgumentException("Cannot set an X5455 timestamp larger than 2^32: " + l);
+ }
+ return new ZipLong(l);
+ }
+
+ /**
+ * Returns a String representation of this class useful for
+ * debugging purposes.
+ *
+ * @return A String representation of this class useful for
+ * debugging purposes.
+ */
+ @Override
+ public String toString() {
+ StringBuilder buf = new StringBuilder();
+ buf.append("0x5455 Zip Extra Field: Flags=");
+ buf.append(Integer.toBinaryString(ZipUtil.unsignedIntToSignedByte(flags))).append(" ");
+ if (bit0_modifyTimePresent && modifyTime != null) {
+ Date m = getModifyJavaTime();
+ buf.append(" Modify:[").append(m).append("] ");
+ }
+ if (bit1_accessTimePresent && accessTime != null) {
+ Date a = getAccessJavaTime();
+ buf.append(" Access:[").append(a).append("] ");
+ }
+ if (bit2_createTimePresent && createTime != null) {
+ Date c = getCreateJavaTime();
+ buf.append(" Create:[").append(c).append("] ");
+ }
+ return buf.toString();
+ }
+
+ @Override
+ public Object clone() throws CloneNotSupportedException {
+ return super.clone();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof X5455_ExtendedTimestamp) {
+ X5455_ExtendedTimestamp xf = (X5455_ExtendedTimestamp) o;
+
+ // The ZipLong==ZipLong clauses handle the cases where both are null.
+ // and only last 3 bits of flags matter.
+ return ((flags & 0x07) == (xf.flags & 0x07)) &&
+ (modifyTime == xf.modifyTime || (modifyTime != null && modifyTime.equals(xf.modifyTime))) &&
+ (accessTime == xf.accessTime || (accessTime != null && accessTime.equals(xf.accessTime))) &&
+ (createTime == xf.createTime || (createTime != null && createTime.equals(xf.createTime)));
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ int hc = (-123 * (flags & 0x07)); // only last 3 bits of flags matter
+ if (modifyTime != null) {
+ hc ^= modifyTime.hashCode();
+ }
+ if (accessTime != null) {
+ // Since accessTime is often same as modifyTime,
+ // this prevents them from XOR negating each other.
+ hc ^= Integer.rotateLeft(accessTime.hashCode(), 11);
+ }
+ if (createTime != null) {
+ hc ^= Integer.rotateLeft(createTime.hashCode(), 22);
+ }
+ return hc;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/X7875_NewUnix.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/X7875_NewUnix.java
new file mode 100644
index 000000000..87d1e1d40
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/X7875_NewUnix.java
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.Serializable;
+import java.math.BigInteger;
+import java.util.zip.ZipException;
+
+import static org.apache.commons.compress.archivers.zip.ZipUtil.reverse;
+import static org.apache.commons.compress.archivers.zip.ZipUtil.signedByteToUnsignedInt;
+import static org.apache.commons.compress.archivers.zip.ZipUtil.unsignedIntToSignedByte;
+
+/**
+ * An extra field that stores UNIX UID/GID data (owner & group ownership) for a given
+ * zip entry. We're using the field definition given in Info-Zip's source archive:
+ * zip-3.0.tar.gz/proginfo/extrafld.txt
+ *
+ *
+ * Value Size Description
+ * ----- ---- -----------
+ * 0x7875 Short tag for this extra block type ("ux")
+ * TSize Short total data size for this block
+ * Version 1 byte version of this extra field, currently 1
+ * UIDSize 1 byte Size of UID field
+ * UID Variable UID for this entry (little endian)
+ * GIDSize 1 byte Size of GID field
+ * GID Variable GID for this entry (little endian)
+ *
+ * @since 1.5
+ */
+public class X7875_NewUnix implements ZipExtraField, Cloneable, Serializable {
+ private static final ZipShort HEADER_ID = new ZipShort(0x7875);
+ private static final BigInteger ONE_THOUSAND = BigInteger.valueOf(1000);
+ private static final long serialVersionUID = 1L;
+
+ private int version = 1; // always '1' according to current info-zip spec.
+
+ // BigInteger helps us with little-endian / big-endian conversions.
+ // (thanks to BigInteger.toByteArray() and a reverse() method we created).
+ // Also, the spec theoretically allows UID/GID up to 255 bytes long!
+ //
+ // NOTE: equals() and hashCode() currently assume these can never be null.
+ private BigInteger uid;
+ private BigInteger gid;
+
+ /**
+ * Constructor for X7875_NewUnix.
+ */
+ public X7875_NewUnix() {
+ reset();
+ }
+
+ /**
+ * The Header-ID.
+ *
+ * @return the value for the header id for this extrafield
+ */
+ public ZipShort getHeaderId() {
+ return HEADER_ID;
+ }
+
+ /**
+ * Gets the UID as a long. UID is typically a 32 bit unsigned
+ * value on most UNIX systems, so we return a long to avoid
+ * integer overflow into the negatives in case values above
+ * and including 2^31 are being used.
+ *
+ * @return the UID value.
+ */
+ public long getUID() { return ZipUtil.bigToLong(uid); }
+
+ /**
+ * Gets the GID as a long. GID is typically a 32 bit unsigned
+ * value on most UNIX systems, so we return a long to avoid
+ * integer overflow into the negatives in case values above
+ * and including 2^31 are being used.
+ *
+ * @return the GID value.
+ */
+ public long getGID() { return ZipUtil.bigToLong(gid); }
+
+ /**
+ * Sets the UID.
+ *
+ * @param l UID value to set on this extra field.
+ */
+ public void setUID(long l) {
+ this.uid = ZipUtil.longToBig(l);
+ }
+
+ /**
+ * Sets the GID.
+ *
+ * @param l GID value to set on this extra field.
+ */
+ public void setGID(long l) {
+ this.gid = ZipUtil.longToBig(l);
+ }
+
+ /**
+ * Length of the extra field in the local file data - without
+ * Header-ID or length specifier.
+ *
+ * @return a ZipShort for the length of the data of this extra field
+ */
+ public ZipShort getLocalFileDataLength() {
+ int uidSize = trimLeadingZeroesForceMinLength(uid.toByteArray()).length;
+ int gidSize = trimLeadingZeroesForceMinLength(gid.toByteArray()).length;
+
+ // The 3 comes from: version=1 + uidsize=1 + gidsize=1
+ return new ZipShort(3 + uidSize + gidSize);
+ }
+
+ /**
+ * Length of the extra field in the central directory data - without
+ * Header-ID or length specifier.
+ *
+ * @return a ZipShort for the length of the data of this extra field
+ */
+ public ZipShort getCentralDirectoryLength() {
+ return getLocalFileDataLength(); // No different than local version.
+ }
+
+ /**
+ * The actual data to put into local file data - without Header-ID
+ * or length specifier.
+ *
+ * @return get the data
+ */
+ public byte[] getLocalFileDataData() {
+ byte[] uidBytes = uid.toByteArray();
+ byte[] gidBytes = gid.toByteArray();
+
+ // BigInteger might prepend a leading-zero to force a positive representation
+ // (e.g., so that the sign-bit is set to zero). We need to remove that
+ // before sending the number over the wire.
+ uidBytes = trimLeadingZeroesForceMinLength(uidBytes);
+ gidBytes = trimLeadingZeroesForceMinLength(gidBytes);
+
+ // Couldn't bring myself to just call getLocalFileDataLength() when we've
+ // already got the arrays right here. Yeah, yeah, I know, premature
+ // optimization is the root of all...
+ //
+ // The 3 comes from: version=1 + uidsize=1 + gidsize=1
+ byte[] data = new byte[3 + uidBytes.length + gidBytes.length];
+
+ // reverse() switches byte array from big-endian to little-endian.
+ reverse(uidBytes);
+ reverse(gidBytes);
+
+ int pos = 0;
+ data[pos++] = unsignedIntToSignedByte(version);
+ data[pos++] = unsignedIntToSignedByte(uidBytes.length);
+ System.arraycopy(uidBytes, 0, data, pos, uidBytes.length);
+ pos += uidBytes.length;
+ data[pos++] = unsignedIntToSignedByte(gidBytes.length);
+ System.arraycopy(gidBytes, 0, data, pos, gidBytes.length);
+ return data;
+ }
+
+ /**
+ * The actual data to put into central directory data - without Header-ID
+ * or length specifier.
+ *
+ * @return get the data
+ */
+ public byte[] getCentralDirectoryData() {
+ return getLocalFileDataData();
+ }
+
+ /**
+ * Populate data from this array as if it was in local file data.
+ *
+ * @param data an array of bytes
+ * @param offset the start offset
+ * @param length the number of bytes in the array from offset
+ * @throws java.util.zip.ZipException on error
+ */
+ public void parseFromLocalFileData(
+ byte[] data, int offset, int length
+ ) throws ZipException {
+ reset();
+ this.version = signedByteToUnsignedInt(data[offset++]);
+ int uidSize = signedByteToUnsignedInt(data[offset++]);
+ byte[] uidBytes = new byte[uidSize];
+ System.arraycopy(data, offset, uidBytes, 0, uidSize);
+ offset += uidSize;
+ this.uid = new BigInteger(1, reverse(uidBytes)); // sign-bit forced positive
+
+ int gidSize = signedByteToUnsignedInt(data[offset++]);
+ byte[] gidBytes = new byte[gidSize];
+ System.arraycopy(data, offset, gidBytes, 0, gidSize);
+ this.gid = new BigInteger(1, reverse(gidBytes)); // sign-bit forced positive
+ }
+
+ /**
+ * Doesn't do anything special since this class always uses the
+ * same data in central directory and local file data.
+ */
+ public void parseFromCentralDirectoryData(
+ byte[] buffer, int offset, int length
+ ) throws ZipException {
+ reset();
+ parseFromLocalFileData(buffer, offset, length);
+ }
+
+ /**
+ * Reset state back to newly constructed state. Helps us make sure
+ * parse() calls always generate clean results.
+ */
+ private void reset() {
+ // Typical UID/GID of the first non-root user created on a unix system.
+ uid = ONE_THOUSAND;
+ gid = ONE_THOUSAND;
+ }
+
+ /**
+ * Returns a String representation of this class useful for
+ * debugging purposes.
+ *
+ * @return A String representation of this class useful for
+ * debugging purposes.
+ */
+ @Override
+ public String toString() {
+ return "0x7875 Zip Extra Field: UID=" + uid + " GID=" + gid;
+ }
+
+ @Override
+ public Object clone() throws CloneNotSupportedException {
+ return super.clone();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof X7875_NewUnix) {
+ X7875_NewUnix xf = (X7875_NewUnix) o;
+ // We assume uid and gid can never be null.
+ return version == xf.version && uid.equals(xf.uid) && gid.equals(xf.gid);
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ int hc = -1234567 * version;
+ // Since most UID's and GID's are below 65,536, this is (hopefully!)
+ // a nice way to make sure typical UID and GID values impact the hash
+ // as much as possible.
+ hc ^= Integer.rotateLeft(uid.hashCode(), 16);
+ hc ^= gid.hashCode();
+ return hc;
+ }
+
+ /**
+ * Not really for external usage, but marked "package" visibility
+ * to help us JUnit it. Trims a byte array of leading zeroes while
+ * also enforcing a minimum length, and thus it really trims AND pads
+ * at the same time.
+ *
+ * @param array byte[] array to trim & pad.
+ * @return trimmed & padded byte[] array.
+ */
+ static byte[] trimLeadingZeroesForceMinLength(byte[] array) {
+ if (array == null) {
+ return array;
+ }
+
+ int pos = 0;
+ for (byte b : array) {
+ if (b == 0) {
+ pos++;
+ } else {
+ break;
+ }
+ }
+
+ /*
+
+ I agonized over my choice of MIN_LENGTH=1. Here's the situation:
+ InfoZip (the tool I am using to test interop) always sets these
+ to length=4. And so a UID of 0 (typically root) for example is
+ encoded as {4,0,0,0,0} (len=4, 32 bits of zero), when it could just
+ as easily be encoded as {1,0} (len=1, 8 bits of zero) according to
+ the spec.
+
+ In the end I decided on MIN_LENGTH=1 for four reasons:
+
+ 1.) We are adhering to the spec as far as I can tell, and so
+ a consumer that cannot parse this is broken.
+
+ 2.) Fundamentally, zip files are about shrinking things, so
+ let's save a few bytes per entry while we can.
+
+ 3.) Of all the people creating zip files using commons-
+ compress, how many care about UNIX UID/GID attributes
+ of the files they store? (e.g., I am probably thinking
+ way too hard about this and no one cares!)
+
+ 4.) InfoZip's tool, even though it carefully stores every UID/GID
+ for every file zipped on a unix machine (by default) currently
+ appears unable to ever restore UID/GID.
+ unzip -X has no effect on my machine, even when run as root!!!!
+
+ And thus it is decided: MIN_LENGTH=1.
+
+ If anyone runs into interop problems from this, feel free to set
+ it to MIN_LENGTH=4 at some future time, and then we will behave
+ exactly like InfoZip (requires changes to unit tests, though).
+
+ And I am sorry that the time you spent reading this comment is now
+ gone and you can never have it back.
+
+ */
+ final int MIN_LENGTH = 1;
+
+ byte[] trimmedArray = new byte[Math.max(MIN_LENGTH, array.length - pos)];
+ int startPos = trimmedArray.length - (array.length - pos);
+ System.arraycopy(array, pos, trimmedArray, startPos, trimmedArray.length - startPos);
+ return trimmedArray;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/Zip64ExtendedInformationExtraField.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/Zip64ExtendedInformationExtraField.java
new file mode 100644
index 000000000..a75395863
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/Zip64ExtendedInformationExtraField.java
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.util.zip.ZipException;
+
+import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD;
+
+/**
+ * Holds size and other extended information for entries that use Zip64
+ * features.
+ *
+ * Currently Commons Compress doesn't support encrypting the
+ * central directory so the note in APPNOTE.TXT about masking doesn't
+ * apply.
+ *
+ * The implementation relies on data being read from the local file
+ * header and assumes that both size values are always present.
+ *
+ * @see PKWARE's
+ * APPNOTE.TXT, section 4.5.3
+ *
+ * @since 1.2
+ * @NotThreadSafe
+ */
+public class Zip64ExtendedInformationExtraField implements ZipExtraField {
+
+ static final ZipShort HEADER_ID = new ZipShort(0x0001);
+
+ private static final String LFH_MUST_HAVE_BOTH_SIZES_MSG =
+ "Zip64 extended information must contain"
+ + " both size values in the local file header.";
+ private static final byte[] EMPTY = new byte[0];
+
+ private ZipEightByteInteger size, compressedSize, relativeHeaderOffset;
+ private ZipLong diskStart;
+
+ /**
+ * Stored in {@link #parseFromCentralDirectoryData
+ * parseFromCentralDirectoryData} so it can be reused when ZipFile
+ * calls {@link #reparseCentralDirectoryData
+ * reparseCentralDirectoryData}.
+ *
+ * Not used for anything else
+ *
+ * @since 1.3
+ */
+ private byte[] rawCentralDirectoryData;
+
+ /**
+ * This constructor should only be used by the code that reads
+ * archives inside of Commons Compress.
+ */
+ public Zip64ExtendedInformationExtraField() { }
+
+ /**
+ * Creates an extra field based on the original and compressed size.
+ *
+ * @param size the entry's original size
+ * @param compressedSize the entry's compressed size
+ *
+ * @throws IllegalArgumentException if size or compressedSize is null
+ */
+ public Zip64ExtendedInformationExtraField(ZipEightByteInteger size,
+ ZipEightByteInteger compressedSize) {
+ this(size, compressedSize, null, null);
+ }
+
+ /**
+ * Creates an extra field based on all four possible values.
+ *
+ * @param size the entry's original size
+ * @param compressedSize the entry's compressed size
+ *
+ * @throws IllegalArgumentException if size or compressedSize is null
+ */
+ public Zip64ExtendedInformationExtraField(ZipEightByteInteger size,
+ ZipEightByteInteger compressedSize,
+ ZipEightByteInteger relativeHeaderOffset,
+ ZipLong diskStart) {
+ this.size = size;
+ this.compressedSize = compressedSize;
+ this.relativeHeaderOffset = relativeHeaderOffset;
+ this.diskStart = diskStart;
+ }
+
+ public ZipShort getHeaderId() {
+ return HEADER_ID;
+ }
+
+ public ZipShort getLocalFileDataLength() {
+ return new ZipShort(size != null ? 2 * DWORD : 0);
+ }
+
+ public ZipShort getCentralDirectoryLength() {
+ return new ZipShort((size != null ? DWORD : 0)
+ + (compressedSize != null ? DWORD : 0)
+ + (relativeHeaderOffset != null ? DWORD : 0)
+ + (diskStart != null ? WORD : 0));
+ }
+
+ public byte[] getLocalFileDataData() {
+ if (size != null || compressedSize != null) {
+ if (size == null || compressedSize == null) {
+ throw new IllegalArgumentException(LFH_MUST_HAVE_BOTH_SIZES_MSG);
+ }
+ byte[] data = new byte[2 * DWORD];
+ addSizes(data);
+ return data;
+ }
+ return EMPTY;
+ }
+
+ public byte[] getCentralDirectoryData() {
+ byte[] data = new byte[getCentralDirectoryLength().getValue()];
+ int off = addSizes(data);
+ if (relativeHeaderOffset != null) {
+ System.arraycopy(relativeHeaderOffset.getBytes(), 0, data, off, DWORD);
+ off += DWORD;
+ }
+ if (diskStart != null) {
+ System.arraycopy(diskStart.getBytes(), 0, data, off, WORD);
+ off += WORD;
+ }
+ return data;
+ }
+
+ public void parseFromLocalFileData(byte[] buffer, int offset, int length)
+ throws ZipException {
+ if (length == 0) {
+ // no local file data at all, may happen if an archive
+ // only holds a ZIP64 extended information extra field
+ // inside the central directory but not inside the local
+ // file header
+ return;
+ }
+ if (length < 2 * DWORD) {
+ throw new ZipException(LFH_MUST_HAVE_BOTH_SIZES_MSG);
+ }
+ size = new ZipEightByteInteger(buffer, offset);
+ offset += DWORD;
+ compressedSize = new ZipEightByteInteger(buffer, offset);
+ offset += DWORD;
+ int remaining = length - 2 * DWORD;
+ if (remaining >= DWORD) {
+ relativeHeaderOffset = new ZipEightByteInteger(buffer, offset);
+ offset += DWORD;
+ remaining -= DWORD;
+ }
+ if (remaining >= WORD) {
+ diskStart = new ZipLong(buffer, offset);
+ offset += WORD;
+ remaining -= WORD;
+ }
+ }
+
+ public void parseFromCentralDirectoryData(byte[] buffer, int offset,
+ int length)
+ throws ZipException {
+ // store for processing in reparseCentralDirectoryData
+ rawCentralDirectoryData = new byte[length];
+ System.arraycopy(buffer, offset, rawCentralDirectoryData, 0, length);
+
+ // if there is no size information in here, we are screwed and
+ // can only hope things will get resolved by LFH data later
+ // But there are some cases that can be detected
+ // * all data is there
+ // * length == 24 -> both sizes and offset
+ // * length % 8 == 4 -> at least we can identify the diskStart field
+ if (length >= 3 * DWORD + WORD) {
+ parseFromLocalFileData(buffer, offset, length);
+ } else if (length == 3 * DWORD) {
+ size = new ZipEightByteInteger(buffer, offset);
+ offset += DWORD;
+ compressedSize = new ZipEightByteInteger(buffer, offset);
+ offset += DWORD;
+ relativeHeaderOffset = new ZipEightByteInteger(buffer, offset);
+ } else if (length % DWORD == WORD) {
+ diskStart = new ZipLong(buffer, offset + length - WORD);
+ }
+ }
+
+ /**
+ * Parses the raw bytes read from the central directory extra
+ * field with knowledge which fields are expected to be there.
+ *
+ * All four fields inside the zip64 extended information extra
+ * field are optional and must only be present if their corresponding
+ * entry inside the central directory contains the correct magic
+ * value.
+ */
+ public void reparseCentralDirectoryData(boolean hasUncompressedSize,
+ boolean hasCompressedSize,
+ boolean hasRelativeHeaderOffset,
+ boolean hasDiskStart)
+ throws ZipException {
+ if (rawCentralDirectoryData != null) {
+ int expectedLength = (hasUncompressedSize ? DWORD : 0)
+ + (hasCompressedSize ? DWORD : 0)
+ + (hasRelativeHeaderOffset ? DWORD : 0)
+ + (hasDiskStart ? WORD : 0);
+ if (rawCentralDirectoryData.length < expectedLength) {
+ throw new ZipException("central directory zip64 extended"
+ + " information extra field's length"
+ + " doesn't match central directory"
+ + " data. Expected length "
+ + expectedLength + " but is "
+ + rawCentralDirectoryData.length);
+ }
+ int offset = 0;
+ if (hasUncompressedSize) {
+ size = new ZipEightByteInteger(rawCentralDirectoryData, offset);
+ offset += DWORD;
+ }
+ if (hasCompressedSize) {
+ compressedSize = new ZipEightByteInteger(rawCentralDirectoryData,
+ offset);
+ offset += DWORD;
+ }
+ if (hasRelativeHeaderOffset) {
+ relativeHeaderOffset =
+ new ZipEightByteInteger(rawCentralDirectoryData, offset);
+ offset += DWORD;
+ }
+ if (hasDiskStart) {
+ diskStart = new ZipLong(rawCentralDirectoryData, offset);
+ offset += WORD;
+ }
+ }
+ }
+
+ /**
+ * The uncompressed size stored in this extra field.
+ */
+ public ZipEightByteInteger getSize() {
+ return size;
+ }
+
+ /**
+ * The uncompressed size stored in this extra field.
+ */
+ public void setSize(ZipEightByteInteger size) {
+ this.size = size;
+ }
+
+ /**
+ * The compressed size stored in this extra field.
+ */
+ public ZipEightByteInteger getCompressedSize() {
+ return compressedSize;
+ }
+
+ /**
+ * The uncompressed size stored in this extra field.
+ */
+ public void setCompressedSize(ZipEightByteInteger compressedSize) {
+ this.compressedSize = compressedSize;
+ }
+
+ /**
+ * The relative header offset stored in this extra field.
+ */
+ public ZipEightByteInteger getRelativeHeaderOffset() {
+ return relativeHeaderOffset;
+ }
+
+ /**
+ * The relative header offset stored in this extra field.
+ */
+ public void setRelativeHeaderOffset(ZipEightByteInteger rho) {
+ relativeHeaderOffset = rho;
+ }
+
+ /**
+ * The disk start number stored in this extra field.
+ */
+ public ZipLong getDiskStartNumber() {
+ return diskStart;
+ }
+
+ /**
+ * The disk start number stored in this extra field.
+ */
+ public void setDiskStartNumber(ZipLong ds) {
+ diskStart = ds;
+ }
+
+ private int addSizes(byte[] data) {
+ int off = 0;
+ if (size != null) {
+ System.arraycopy(size.getBytes(), 0, data, 0, DWORD);
+ off += DWORD;
+ }
+ if (compressedSize != null) {
+ System.arraycopy(compressedSize.getBytes(), 0, data, off, DWORD);
+ off += DWORD;
+ }
+ return off;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/Zip64Mode.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/Zip64Mode.java
new file mode 100644
index 000000000..d051e8982
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/Zip64Mode.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+/**
+ * The different modes {@link ZipArchiveOutputStream} can operate in.
+ *
+ * @see ZipArchiveOutputStream#setUseZip64
+ *
+ * @since 1.3
+ */
+public enum Zip64Mode {
+ /**
+ * Use Zip64 extensions for all entries, even if it is clear it is
+ * not required.
+ */
+ Always,
+ /**
+ * Don't use Zip64 extensions for any entries.
+ *
+ * This will cause a {@link Zip64RequiredException} to be
+ * thrown if {@link ZipArchiveOutputStream} detects it needs Zip64
+ * support.
+ */
+ Never,
+ /**
+ * Use Zip64 extensions for all entries where they are required,
+ * don't use them for entries that clearly don't require them.
+ */
+ AsNeeded
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/Zip64RequiredException.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/Zip64RequiredException.java
new file mode 100644
index 000000000..677b4e6e5
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/Zip64RequiredException.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+import java.util.zip.ZipException;
+
+/**
+ * Exception thrown when attempting to write data that requires Zip64
+ * support to an archive and {@link ZipArchiveOutputStream#setUseZip64
+ * UseZip64} has been set to {@link Zip64Mode#Never Never}.
+ * @since 1.3
+ */
+public class Zip64RequiredException extends ZipException {
+
+ private static final long serialVersionUID = 20110809L;
+
+ /**
+ * Helper to format "entry too big" messages.
+ */
+ static String getEntryTooBigMessage(ZipArchiveEntry ze) {
+ return ze.getName() + "'s size exceeds the limit of 4GByte.";
+ }
+
+ static final String ARCHIVE_TOO_BIG_MESSAGE =
+ "archive's size exceeds the limit of 4GByte.";
+
+ static final String TOO_MANY_ENTRIES_MESSAGE =
+ "archive contains more than 65535 entries.";
+
+ public Zip64RequiredException(String reason) {
+ super(reason);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipArchiveEntry.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipArchiveEntry.java
new file mode 100644
index 000000000..8dbc10191
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipArchiveEntry.java
@@ -0,0 +1,701 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.zip.ZipException;
+
+/**
+ * Extension that adds better handling of extra fields and provides
+ * access to the internal and external file attributes.
+ *
+ * The extra data is expected to follow the recommendation of
+ * APPNOTE.TXT:
+ *
+ * - the extra byte array consists of a sequence of extra fields
+ * - each extra fields starts by a two byte header id followed by
+ * a two byte sequence holding the length of the remainder of
+ * data.
+ *
+ *
+ * Any extra data that cannot be parsed by the rules above will be
+ * consumed as "unparseable" extra data and treated differently by the
+ * methods of this class. Versions prior to Apache Commons Compress
+ * 1.1 would have thrown an exception if any attempt was made to read
+ * or write extra data not conforming to the recommendation.
+ *
+ * @NotThreadSafe
+ */
+public class ZipArchiveEntry extends java.util.zip.ZipEntry
+ implements ArchiveEntry {
+
+ public static final int PLATFORM_UNIX = 3;
+ public static final int PLATFORM_FAT = 0;
+ private static final int SHORT_MASK = 0xFFFF;
+ private static final int SHORT_SHIFT = 16;
+ private static final byte[] EMPTY = new byte[0];
+
+ /**
+ * The {@link java.util.zip.ZipEntry} base class only supports
+ * the compression methods STORED and DEFLATED. We override the
+ * field so that any compression methods can be used.
+ *
+ * The default value -1 means that the method has not been specified.
+ *
+ * @see COMPRESS-93
+ */
+ private int method = -1;
+
+ /**
+ * The {@link java.util.zip.ZipEntry#setSize} method in the base
+ * class throws an IllegalArgumentException if the size is bigger
+ * than 2GB for Java versions < 7. Need to keep our own size
+ * information for Zip64 support.
+ */
+ private long size = SIZE_UNKNOWN;
+
+ private int internalAttributes = 0;
+ private int platform = PLATFORM_FAT;
+ private long externalAttributes = 0;
+ private LinkedHashMap extraFields = null;
+ private UnparseableExtraFieldData unparseableExtra = null;
+ private String name = null;
+ private byte[] rawName = null;
+ private GeneralPurposeBit gpb = new GeneralPurposeBit();
+
+ /**
+ * Creates a new zip entry with the specified name.
+ *
+ * Assumes the entry represents a directory if and only if the
+ * name ends with a forward slash "/".
+ *
+ * @param name the name of the entry
+ */
+ public ZipArchiveEntry(String name) {
+ super(name);
+ setName(name);
+ }
+
+ /**
+ * Creates a new zip entry with fields taken from the specified zip entry.
+ *
+ * Assumes the entry represents a directory if and only if the
+ * name ends with a forward slash "/".
+ *
+ * @param entry the entry to get fields from
+ * @throws ZipException on error
+ */
+ public ZipArchiveEntry(java.util.zip.ZipEntry entry) throws ZipException {
+ super(entry);
+ setName(entry.getName());
+ byte[] extra = entry.getExtra();
+ if (extra != null) {
+ setExtraFields(ExtraFieldUtils.parse(extra, true,
+ ExtraFieldUtils
+ .UnparseableExtraField.READ));
+ } else {
+ // initializes extra data to an empty byte array
+ setExtra();
+ }
+ setMethod(entry.getMethod());
+ this.size = entry.getSize();
+ }
+
+ /**
+ * Creates a new zip entry with fields taken from the specified zip entry.
+ *
+ * Assumes the entry represents a directory if and only if the
+ * name ends with a forward slash "/".
+ *
+ * @param entry the entry to get fields from
+ * @throws ZipException on error
+ */
+ public ZipArchiveEntry(ZipArchiveEntry entry) throws ZipException {
+ this((java.util.zip.ZipEntry) entry);
+ setInternalAttributes(entry.getInternalAttributes());
+ setExternalAttributes(entry.getExternalAttributes());
+ setExtraFields(entry.getExtraFields(true));
+ }
+
+ /**
+ */
+ protected ZipArchiveEntry() {
+ this("");
+ }
+
+ /**
+ * Creates a new zip entry taking some information from the given
+ * file and using the provided name.
+ *
+ * The name will be adjusted to end with a forward slash "/" if
+ * the file is a directory. If the file is not a directory a
+ * potential trailing forward slash will be stripped from the
+ * entry name.
+ */
+ public ZipArchiveEntry(File inputFile, String entryName) {
+ this(inputFile.isDirectory() && !entryName.endsWith("/") ?
+ entryName + "/" : entryName);
+ if (inputFile.isFile()){
+ setSize(inputFile.length());
+ }
+ setTime(inputFile.lastModified());
+ // TODO are there any other fields we can set here?
+ }
+
+ /**
+ * Overwrite clone.
+ * @return a cloned copy of this ZipArchiveEntry
+ */
+ @Override
+ public Object clone() {
+ ZipArchiveEntry e = (ZipArchiveEntry) super.clone();
+
+ e.setInternalAttributes(getInternalAttributes());
+ e.setExternalAttributes(getExternalAttributes());
+ e.setExtraFields(getExtraFields(true));
+ return e;
+ }
+
+ /**
+ * Returns the compression method of this entry, or -1 if the
+ * compression method has not been specified.
+ *
+ * @return compression method
+ *
+ * @since 1.1
+ */
+ @Override
+ public int getMethod() {
+ return method;
+ }
+
+ /**
+ * Sets the compression method of this entry.
+ *
+ * @param method compression method
+ *
+ * @since 1.1
+ */
+ @Override
+ public void setMethod(int method) {
+ if (method < 0) {
+ throw new IllegalArgumentException(
+ "ZIP compression method can not be negative: " + method);
+ }
+ this.method = method;
+ }
+
+ /**
+ * Retrieves the internal file attributes.
+ *
+ * @return the internal file attributes
+ */
+ public int getInternalAttributes() {
+ return internalAttributes;
+ }
+
+ /**
+ * Sets the internal file attributes.
+ * @param value an int value
+ */
+ public void setInternalAttributes(int value) {
+ internalAttributes = value;
+ }
+
+ /**
+ * Retrieves the external file attributes.
+ * @return the external file attributes
+ */
+ public long getExternalAttributes() {
+ return externalAttributes;
+ }
+
+ /**
+ * Sets the external file attributes.
+ * @param value an long value
+ */
+ public void setExternalAttributes(long value) {
+ externalAttributes = value;
+ }
+
+ /**
+ * Sets Unix permissions in a way that is understood by Info-Zip's
+ * unzip command.
+ * @param mode an int value
+ */
+ public void setUnixMode(int mode) {
+ // CheckStyle:MagicNumberCheck OFF - no point
+ setExternalAttributes((mode << SHORT_SHIFT)
+ // MS-DOS read-only attribute
+ | ((mode & 0200) == 0 ? 1 : 0)
+ // MS-DOS directory flag
+ | (isDirectory() ? 0x10 : 0));
+ // CheckStyle:MagicNumberCheck ON
+ platform = PLATFORM_UNIX;
+ }
+
+ /**
+ * Unix permission.
+ * @return the unix permissions
+ */
+ public int getUnixMode() {
+ return platform != PLATFORM_UNIX ? 0 :
+ (int) ((getExternalAttributes() >> SHORT_SHIFT) & SHORT_MASK);
+ }
+
+ /**
+ * Returns true if this entry represents a unix symlink,
+ * in which case the entry's content contains the target path
+ * for the symlink.
+ *
+ * @since 1.5
+ * @return true if the entry represents a unix symlink, false otherwise.
+ */
+ public boolean isUnixSymlink() {
+ return (getUnixMode() & UnixStat.LINK_FLAG) == UnixStat.LINK_FLAG;
+ }
+
+ /**
+ * Platform specification to put into the "version made
+ * by" part of the central file header.
+ *
+ * @return PLATFORM_FAT unless {@link #setUnixMode setUnixMode}
+ * has been called, in which case PLATFORM_UNIX will be returned.
+ */
+ public int getPlatform() {
+ return platform;
+ }
+
+ /**
+ * Set the platform (UNIX or FAT).
+ * @param platform an int value - 0 is FAT, 3 is UNIX
+ */
+ protected void setPlatform(int platform) {
+ this.platform = platform;
+ }
+
+ /**
+ * Replaces all currently attached extra fields with the new array.
+ * @param fields an array of extra fields
+ */
+ public void setExtraFields(ZipExtraField[] fields) {
+ extraFields = new LinkedHashMap();
+ for (ZipExtraField field : fields) {
+ if (field instanceof UnparseableExtraFieldData) {
+ unparseableExtra = (UnparseableExtraFieldData) field;
+ } else {
+ extraFields.put(field.getHeaderId(), field);
+ }
+ }
+ setExtra();
+ }
+
+ /**
+ * Retrieves all extra fields that have been parsed successfully.
+ * @return an array of the extra fields
+ */
+ public ZipExtraField[] getExtraFields() {
+ return getExtraFields(false);
+ }
+
+ /**
+ * Retrieves extra fields.
+ * @param includeUnparseable whether to also return unparseable
+ * extra fields as {@link UnparseableExtraFieldData} if such data
+ * exists.
+ * @return an array of the extra fields
+ *
+ * @since 1.1
+ */
+ public ZipExtraField[] getExtraFields(boolean includeUnparseable) {
+ if (extraFields == null) {
+ return !includeUnparseable || unparseableExtra == null
+ ? new ZipExtraField[0]
+ : new ZipExtraField[] { unparseableExtra };
+ }
+ List result =
+ new ArrayList(extraFields.values());
+ if (includeUnparseable && unparseableExtra != null) {
+ result.add(unparseableExtra);
+ }
+ return result.toArray(new ZipExtraField[0]);
+ }
+
+ /**
+ * Adds an extra field - replacing an already present extra field
+ * of the same type.
+ *
+ * If no extra field of the same type exists, the field will be
+ * added as last field.
+ * @param ze an extra field
+ */
+ public void addExtraField(ZipExtraField ze) {
+ if (ze instanceof UnparseableExtraFieldData) {
+ unparseableExtra = (UnparseableExtraFieldData) ze;
+ } else {
+ if (extraFields == null) {
+ extraFields = new LinkedHashMap();
+ }
+ extraFields.put(ze.getHeaderId(), ze);
+ }
+ setExtra();
+ }
+
+ /**
+ * Adds an extra field - replacing an already present extra field
+ * of the same type.
+ *
+ * The new extra field will be the first one.
+ * @param ze an extra field
+ */
+ public void addAsFirstExtraField(ZipExtraField ze) {
+ if (ze instanceof UnparseableExtraFieldData) {
+ unparseableExtra = (UnparseableExtraFieldData) ze;
+ } else {
+ LinkedHashMap copy = extraFields;
+ extraFields = new LinkedHashMap();
+ extraFields.put(ze.getHeaderId(), ze);
+ if (copy != null) {
+ copy.remove(ze.getHeaderId());
+ extraFields.putAll(copy);
+ }
+ }
+ setExtra();
+ }
+
+ /**
+ * Remove an extra field.
+ * @param type the type of extra field to remove
+ */
+ public void removeExtraField(ZipShort type) {
+ if (extraFields == null) {
+ throw new java.util.NoSuchElementException();
+ }
+ if (extraFields.remove(type) == null) {
+ throw new java.util.NoSuchElementException();
+ }
+ setExtra();
+ }
+
+ /**
+ * Removes unparseable extra field data.
+ *
+ * @since 1.1
+ */
+ public void removeUnparseableExtraFieldData() {
+ if (unparseableExtra == null) {
+ throw new java.util.NoSuchElementException();
+ }
+ unparseableExtra = null;
+ setExtra();
+ }
+
+ /**
+ * Looks up an extra field by its header id.
+ *
+ * @return null if no such field exists.
+ */
+ public ZipExtraField getExtraField(ZipShort type) {
+ if (extraFields != null) {
+ return extraFields.get(type);
+ }
+ return null;
+ }
+
+ /**
+ * Looks up extra field data that couldn't be parsed correctly.
+ *
+ * @return null if no such field exists.
+ *
+ * @since 1.1
+ */
+ public UnparseableExtraFieldData getUnparseableExtraFieldData() {
+ return unparseableExtra;
+ }
+
+ /**
+ * Parses the given bytes as extra field data and consumes any
+ * unparseable data as an {@link UnparseableExtraFieldData}
+ * instance.
+ * @param extra an array of bytes to be parsed into extra fields
+ * @throws RuntimeException if the bytes cannot be parsed
+ * @throws RuntimeException on error
+ */
+ @Override
+ public void setExtra(byte[] extra) throws RuntimeException {
+ try {
+ ZipExtraField[] local =
+ ExtraFieldUtils.parse(extra, true,
+ ExtraFieldUtils.UnparseableExtraField.READ);
+ mergeExtraFields(local, true);
+ } catch (ZipException e) {
+ // actually this is not possible as of Commons Compress 1.1
+ throw new RuntimeException("Error parsing extra fields for entry: "
+ + getName() + " - " + e.getMessage(), e);
+ }
+ }
+
+ /**
+ * Unfortunately {@link java.util.zip.ZipOutputStream
+ * java.util.zip.ZipOutputStream} seems to access the extra data
+ * directly, so overriding getExtra doesn't help - we need to
+ * modify super's data directly.
+ */
+ protected void setExtra() {
+ super.setExtra(ExtraFieldUtils.mergeLocalFileDataData(getExtraFields(true)));
+ }
+
+ /**
+ * Sets the central directory part of extra fields.
+ */
+ public void setCentralDirectoryExtra(byte[] b) {
+ try {
+ ZipExtraField[] central =
+ ExtraFieldUtils.parse(b, false,
+ ExtraFieldUtils.UnparseableExtraField.READ);
+ mergeExtraFields(central, false);
+ } catch (ZipException e) {
+ throw new RuntimeException(e.getMessage(), e);
+ }
+ }
+
+ /**
+ * Retrieves the extra data for the local file data.
+ * @return the extra data for local file
+ */
+ public byte[] getLocalFileDataExtra() {
+ byte[] extra = getExtra();
+ return extra != null ? extra : EMPTY;
+ }
+
+ /**
+ * Retrieves the extra data for the central directory.
+ * @return the central directory extra data
+ */
+ public byte[] getCentralDirectoryExtra() {
+ return ExtraFieldUtils.mergeCentralDirectoryData(getExtraFields(true));
+ }
+
+ /**
+ * Get the name of the entry.
+ * @return the entry name
+ */
+ @Override
+ public String getName() {
+ return name == null ? super.getName() : name;
+ }
+
+ /**
+ * Is this entry a directory?
+ * @return true if the entry is a directory
+ */
+ @Override
+ public boolean isDirectory() {
+ return getName().endsWith("/");
+ }
+
+ /**
+ * Set the name of the entry.
+ * @param name the name to use
+ */
+ protected void setName(String name) {
+ if (name != null && getPlatform() == PLATFORM_FAT
+ && name.indexOf("/") == -1) {
+ name = name.replace('\\', '/');
+ }
+ this.name = name;
+ }
+
+ /**
+ * Gets the uncompressed size of the entry data.
+ * @return the entry size
+ */
+ @Override
+ public long getSize() {
+ return size;
+ }
+
+ /**
+ * Sets the uncompressed size of the entry data.
+ * @param size the uncompressed size in bytes
+ * @exception IllegalArgumentException if the specified size is less
+ * than 0
+ */
+ @Override
+ public void setSize(long size) {
+ if (size < 0) {
+ throw new IllegalArgumentException("invalid entry size");
+ }
+ this.size = size;
+ }
+
+ /**
+ * Sets the name using the raw bytes and the string created from
+ * it by guessing or using the configured encoding.
+ * @param name the name to use created from the raw bytes using
+ * the guessed or configured encoding
+ * @param rawName the bytes originally read as name from the
+ * archive
+ * @since 1.2
+ */
+ protected void setName(String name, byte[] rawName) {
+ setName(name);
+ this.rawName = rawName;
+ }
+
+ /**
+ * Returns the raw bytes that made up the name before it has been
+ * converted using the configured or guessed encoding.
+ *
+ * This method will return null if this instance has not been
+ * read from an archive.
+ *
+ * @since 1.2
+ */
+ public byte[] getRawName() {
+ if (rawName != null) {
+ byte[] b = new byte[rawName.length];
+ System.arraycopy(rawName, 0, b, 0, rawName.length);
+ return b;
+ }
+ return null;
+ }
+
+ /**
+ * Get the hashCode of the entry.
+ * This uses the name as the hashcode.
+ * @return a hashcode.
+ */
+ @Override
+ public int hashCode() {
+ // this method has severe consequences on performance. We cannot rely
+ // on the super.hashCode() method since super.getName() always return
+ // the empty string in the current implemention (there's no setter)
+ // so it is basically draining the performance of a hashmap lookup
+ return getName().hashCode();
+ }
+
+ /**
+ * The "general purpose bit" field.
+ * @since 1.1
+ */
+ public GeneralPurposeBit getGeneralPurposeBit() {
+ return gpb;
+ }
+
+ /**
+ * The "general purpose bit" field.
+ * @since 1.1
+ */
+ public void setGeneralPurposeBit(GeneralPurposeBit b) {
+ gpb = b;
+ }
+
+ /**
+ * If there are no extra fields, use the given fields as new extra
+ * data - otherwise merge the fields assuming the existing fields
+ * and the new fields stem from different locations inside the
+ * archive.
+ * @param f the extra fields to merge
+ * @param local whether the new fields originate from local data
+ */
+ private void mergeExtraFields(ZipExtraField[] f, boolean local)
+ throws ZipException {
+ if (extraFields == null) {
+ setExtraFields(f);
+ } else {
+ for (ZipExtraField element : f) {
+ ZipExtraField existing;
+ if (element instanceof UnparseableExtraFieldData) {
+ existing = unparseableExtra;
+ } else {
+ existing = getExtraField(element.getHeaderId());
+ }
+ if (existing == null) {
+ addExtraField(element);
+ } else {
+ if (local) {
+ byte[] b = element.getLocalFileDataData();
+ existing.parseFromLocalFileData(b, 0, b.length);
+ } else {
+ byte[] b = element.getCentralDirectoryData();
+ existing.parseFromCentralDirectoryData(b, 0, b.length);
+ }
+ }
+ }
+ setExtra();
+ }
+ }
+
+ public Date getLastModifiedDate() {
+ return new Date(getTime());
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#equals(java.lang.Object)
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ ZipArchiveEntry other = (ZipArchiveEntry) obj;
+ String myName = getName();
+ String otherName = other.getName();
+ if (myName == null) {
+ if (otherName != null) {
+ return false;
+ }
+ } else if (!myName.equals(otherName)) {
+ return false;
+ }
+ String myComment = getComment();
+ String otherComment = other.getComment();
+ if (myComment == null) {
+ myComment = "";
+ }
+ if (otherComment == null) {
+ otherComment = "";
+ }
+ return getTime() == other.getTime()
+ && myComment.equals(otherComment)
+ && getInternalAttributes() == other.getInternalAttributes()
+ && getPlatform() == other.getPlatform()
+ && getExternalAttributes() == other.getExternalAttributes()
+ && getMethod() == other.getMethod()
+ && getSize() == other.getSize()
+ && getCrc() == other.getCrc()
+ && getCompressedSize() == other.getCompressedSize()
+ && Arrays.equals(getCentralDirectoryExtra(),
+ other.getCentralDirectoryExtra())
+ && Arrays.equals(getLocalFileDataExtra(),
+ other.getLocalFileDataExtra())
+ && gpb.equals(other.gpb);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipArchiveInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipArchiveInputStream.java
new file mode 100644
index 000000000..202eeb691
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipArchiveInputStream.java
@@ -0,0 +1,1072 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.PushbackInputStream;
+import java.nio.ByteBuffer;
+import java.util.zip.CRC32;
+import java.util.zip.DataFormatException;
+import java.util.zip.Inflater;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipException;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveInputStream;
+import org.apache.commons.compress.utils.IOUtils;
+
+import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC;
+
+/**
+ * Implements an input stream that can read Zip archives.
+ *
+ * Note that {@link ZipArchiveEntry#getSize()} may return -1 if the
+ * DEFLATE algorithm is used, as the size information is not available
+ * from the header.
+ *
+ * The {@link ZipFile} class is preferred when reading from files.
+ *
+ * As of Apache Commons Compress it transparently supports Zip64
+ * extensions and thus individual entries and archives larger than 4
+ * GB or with more than 65536 entries.
+ *
+ * @see ZipFile
+ * @NotThreadSafe
+ */
+public class ZipArchiveInputStream extends ArchiveInputStream {
+
+ /** The zip encoding to use for filenames and the file comment. */
+ private final ZipEncoding zipEncoding;
+
+ /** Whether to look for and use Unicode extra fields. */
+ private final boolean useUnicodeExtraFields;
+
+ /** Wrapped stream, will always be a PushbackInputStream. */
+ private final InputStream in;
+
+ /** Inflater used for all deflated entries. */
+ private final Inflater inf = new Inflater(true);
+
+ /** Buffer used to read from the wrapped stream. */
+ private final ByteBuffer buf = ByteBuffer.allocate(ZipArchiveOutputStream.BUFFER_SIZE);
+
+ /** The entry that is currently being read. */
+ private CurrentEntry current = null;
+
+ /** Whether the stream has been closed. */
+ private boolean closed = false;
+
+ /** Whether the stream has reached the central directory - and thus found all entries. */
+ private boolean hitCentralDirectory = false;
+
+ /**
+ * When reading a stored entry that uses the data descriptor this
+ * stream has to read the full entry and caches it. This is the
+ * cache.
+ */
+ private ByteArrayInputStream lastStoredEntry = null;
+
+ /** Whether the stream will try to read STORED entries that use a data descriptor. */
+ private boolean allowStoredEntriesWithDataDescriptor = false;
+
+ private static final int LFH_LEN = 30;
+ /*
+ local file header signature WORD
+ version needed to extract SHORT
+ general purpose bit flag SHORT
+ compression method SHORT
+ last mod file time SHORT
+ last mod file date SHORT
+ crc-32 WORD
+ compressed size WORD
+ uncompressed size WORD
+ file name length SHORT
+ extra field length SHORT
+ */
+
+ private static final int CFH_LEN = 46;
+ /*
+ central file header signature WORD
+ version made by SHORT
+ version needed to extract SHORT
+ general purpose bit flag SHORT
+ compression method SHORT
+ last mod file time SHORT
+ last mod file date SHORT
+ crc-32 WORD
+ compressed size WORD
+ uncompressed size WORD
+ file name length SHORT
+ extra field length SHORT
+ file comment length SHORT
+ disk number start SHORT
+ internal file attributes SHORT
+ external file attributes WORD
+ relative offset of local header WORD
+ */
+
+ private static final long TWO_EXP_32 = ZIP64_MAGIC + 1;
+
+ // cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection)
+ private final byte[] LFH_BUF = new byte[LFH_LEN];
+ private final byte[] SKIP_BUF = new byte[1024];
+ private final byte[] SHORT_BUF = new byte[SHORT];
+ private final byte[] WORD_BUF = new byte[WORD];
+ private final byte[] TWO_DWORD_BUF = new byte[2 * DWORD];
+
+ private int entriesRead = 0;
+
+ public ZipArchiveInputStream(InputStream inputStream) {
+ this(inputStream, ZipEncodingHelper.UTF8);
+ }
+
+ /**
+ * @param encoding the encoding to use for file names, use null
+ * for the platform's default encoding
+ * @since 1.5
+ */
+ public ZipArchiveInputStream(InputStream inputStream, String encoding) {
+ this(inputStream, encoding, true);
+ }
+
+ /**
+ * @param encoding the encoding to use for file names, use null
+ * for the platform's default encoding
+ * @param useUnicodeExtraFields whether to use InfoZIP Unicode
+ * Extra Fields (if present) to set the file names.
+ */
+ public ZipArchiveInputStream(InputStream inputStream, String encoding, boolean useUnicodeExtraFields) {
+ this(inputStream, encoding, useUnicodeExtraFields, false);
+ }
+
+ /**
+ * @param encoding the encoding to use for file names, use null
+ * for the platform's default encoding
+ * @param useUnicodeExtraFields whether to use InfoZIP Unicode
+ * Extra Fields (if present) to set the file names.
+ * @param allowStoredEntriesWithDataDescriptor whether the stream
+ * will try to read STORED entries that use a data descriptor
+ * @since 1.1
+ */
+ public ZipArchiveInputStream(InputStream inputStream,
+ String encoding,
+ boolean useUnicodeExtraFields,
+ boolean allowStoredEntriesWithDataDescriptor) {
+ zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
+ this.useUnicodeExtraFields = useUnicodeExtraFields;
+ in = new PushbackInputStream(inputStream, buf.capacity());
+ this.allowStoredEntriesWithDataDescriptor =
+ allowStoredEntriesWithDataDescriptor;
+ }
+
+ public ZipArchiveEntry getNextZipEntry() throws IOException {
+ boolean firstEntry = true;
+ if (closed || hitCentralDirectory) {
+ return null;
+ }
+ if (current != null) {
+ closeEntry();
+ firstEntry = false;
+ }
+
+ try {
+ if (firstEntry) {
+ // split archives have a special signature before the
+ // first local file header - look for it and fail with
+ // the appropriate error message if this is a split
+ // archive.
+ readFirstLocalFileHeader(LFH_BUF);
+ } else {
+ readFully(LFH_BUF);
+ }
+ } catch (EOFException e) {
+ return null;
+ }
+
+ ZipLong sig = new ZipLong(LFH_BUF);
+ if (sig.equals(ZipLong.CFH_SIG) || sig.equals(ZipLong.AED_SIG)) {
+ hitCentralDirectory = true;
+ skipRemainderOfArchive();
+ }
+ if (!sig.equals(ZipLong.LFH_SIG)) {
+ return null;
+ }
+
+ int off = WORD;
+ current = new CurrentEntry();
+
+ int versionMadeBy = ZipShort.getValue(LFH_BUF, off);
+ off += SHORT;
+ current.entry.setPlatform((versionMadeBy >> ZipFile.BYTE_SHIFT) & ZipFile.NIBLET_MASK);
+
+ final GeneralPurposeBit gpFlag = GeneralPurposeBit.parse(LFH_BUF, off);
+ final boolean hasUTF8Flag = gpFlag.usesUTF8ForNames();
+ final ZipEncoding entryEncoding = hasUTF8Flag ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding;
+ current.hasDataDescriptor = gpFlag.usesDataDescriptor();
+ current.entry.setGeneralPurposeBit(gpFlag);
+
+ off += SHORT;
+
+ current.entry.setMethod(ZipShort.getValue(LFH_BUF, off));
+ off += SHORT;
+
+ long time = ZipUtil.dosToJavaTime(ZipLong.getValue(LFH_BUF, off));
+ current.entry.setTime(time);
+ off += WORD;
+
+ ZipLong size = null, cSize = null;
+ if (!current.hasDataDescriptor) {
+ current.entry.setCrc(ZipLong.getValue(LFH_BUF, off));
+ off += WORD;
+
+ cSize = new ZipLong(LFH_BUF, off);
+ off += WORD;
+
+ size = new ZipLong(LFH_BUF, off);
+ off += WORD;
+ } else {
+ off += 3 * WORD;
+ }
+
+ int fileNameLen = ZipShort.getValue(LFH_BUF, off);
+
+ off += SHORT;
+
+ int extraLen = ZipShort.getValue(LFH_BUF, off);
+ off += SHORT;
+
+ byte[] fileName = new byte[fileNameLen];
+ readFully(fileName);
+ current.entry.setName(entryEncoding.decode(fileName), fileName);
+
+ byte[] extraData = new byte[extraLen];
+ readFully(extraData);
+ current.entry.setExtra(extraData);
+
+ if (!hasUTF8Flag && useUnicodeExtraFields) {
+ ZipUtil.setNameAndCommentFromExtraFields(current.entry, fileName, null);
+ }
+
+ processZip64Extra(size, cSize);
+
+ if (current.entry.getCompressedSize() != -1) {
+ if (current.entry.getMethod() == ZipMethod.UNSHRINKING.getCode()) {
+ current.in = new UnshrinkingInputStream(new BoundedInputStream(in, current.entry.getCompressedSize()));
+ } else if (current.entry.getMethod() == ZipMethod.IMPLODING.getCode()) {
+ current.in = new ExplodingInputStream(
+ current.entry.getGeneralPurposeBit().getSlidingDictionarySize(),
+ current.entry.getGeneralPurposeBit().getNumberOfShannonFanoTrees(),
+ new BoundedInputStream(in, current.entry.getCompressedSize()));
+ }
+ }
+
+ entriesRead++;
+ return current.entry;
+ }
+
+ /**
+ * Fills the given array with the first local file header and
+ * deals with splitting/spanning markers that may prefix the first
+ * LFH.
+ */
+ private void readFirstLocalFileHeader(byte[] lfh) throws IOException {
+ readFully(lfh);
+ ZipLong sig = new ZipLong(lfh);
+ if (sig.equals(ZipLong.DD_SIG)) {
+ throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.SPLITTING);
+ }
+
+ if (sig.equals(ZipLong.SINGLE_SEGMENT_SPLIT_MARKER)) {
+ // The archive is not really split as only one segment was
+ // needed in the end. Just skip over the marker.
+ byte[] missedLfhBytes = new byte[4];
+ readFully(missedLfhBytes);
+ System.arraycopy(lfh, 4, lfh, 0, LFH_LEN - 4);
+ System.arraycopy(missedLfhBytes, 0, lfh, LFH_LEN - 4, 4);
+ }
+ }
+
+ /**
+ * Records whether a Zip64 extra is present and sets the size
+ * information from it if sizes are 0xFFFFFFFF and the entry
+ * doesn't use a data descriptor.
+ */
+ private void processZip64Extra(ZipLong size, ZipLong cSize) {
+ Zip64ExtendedInformationExtraField z64 =
+ (Zip64ExtendedInformationExtraField)
+ current.entry.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID);
+ current.usesZip64 = z64 != null;
+ if (!current.hasDataDescriptor) {
+ if (z64 != null // same as current.usesZip64 but avoids NPE warning
+ && (cSize.equals(ZipLong.ZIP64_MAGIC) || size.equals(ZipLong.ZIP64_MAGIC)) ) {
+ current.entry.setCompressedSize(z64.getCompressedSize().getLongValue());
+ current.entry.setSize(z64.getSize().getLongValue());
+ } else {
+ current.entry.setCompressedSize(cSize.getValue());
+ current.entry.setSize(size.getValue());
+ }
+ }
+ }
+
+ @Override
+ public ArchiveEntry getNextEntry() throws IOException {
+ return getNextZipEntry();
+ }
+
+ /**
+ * Whether this class is able to read the given entry.
+ *
+ * May return false if it is set up to use encryption or a
+ * compression method that hasn't been implemented yet.
+ * @since 1.1
+ */
+ @Override
+ public boolean canReadEntryData(ArchiveEntry ae) {
+ if (ae instanceof ZipArchiveEntry) {
+ ZipArchiveEntry ze = (ZipArchiveEntry) ae;
+ return ZipUtil.canHandleEntryData(ze)
+ && supportsDataDescriptorFor(ze);
+
+ }
+ return false;
+ }
+
+ @Override
+ public int read(byte[] buffer, int offset, int length) throws IOException {
+ if (closed) {
+ throw new IOException("The stream is closed");
+ }
+
+ if (current == null) {
+ return -1;
+ }
+
+ // avoid int overflow, check null buffer
+ if (offset > buffer.length || length < 0 || offset < 0 || buffer.length - offset < length) {
+ throw new ArrayIndexOutOfBoundsException();
+ }
+
+ ZipUtil.checkRequestedFeatures(current.entry);
+ if (!supportsDataDescriptorFor(current.entry)) {
+ throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.DATA_DESCRIPTOR,
+ current.entry);
+ }
+
+ int read;
+ if (current.entry.getMethod() == ZipArchiveOutputStream.STORED) {
+ read = readStored(buffer, offset, length);
+ } else if (current.entry.getMethod() == ZipArchiveOutputStream.DEFLATED) {
+ read = readDeflated(buffer, offset, length);
+ } else if (current.entry.getMethod() == ZipMethod.UNSHRINKING.getCode()
+ || current.entry.getMethod() == ZipMethod.IMPLODING.getCode()) {
+ read = current.in.read(buffer, offset, length);
+ } else {
+ throw new UnsupportedZipFeatureException(ZipMethod.getMethodByCode(current.entry.getMethod()),
+ current.entry);
+ }
+
+ if (read >= 0) {
+ current.crc.update(buffer, offset, read);
+ }
+
+ return read;
+ }
+
+ /**
+ * Implementation of read for STORED entries.
+ */
+ private int readStored(byte[] buffer, int offset, int length) throws IOException {
+
+ if (current.hasDataDescriptor) {
+ if (lastStoredEntry == null) {
+ readStoredEntry();
+ }
+ return lastStoredEntry.read(buffer, offset, length);
+ }
+
+ long csize = current.entry.getSize();
+ if (current.bytesRead >= csize) {
+ return -1;
+ }
+
+ if (buf.position() >= buf.limit()) {
+ buf.position(0);
+ int l = in.read(buf.array());
+ if (l == -1) {
+ return -1;
+ }
+ buf.limit(l);
+
+ count(buf.limit());
+ current.bytesReadFromStream += buf.limit();
+ }
+
+ int toRead = Math.min(buf.remaining(), length);
+ if ((csize - current.bytesRead) < toRead) {
+ // if it is smaller than toRead then it fits into an int
+ toRead = (int) (csize - current.bytesRead);
+ }
+ buf.get(buffer, offset, toRead);
+ current.bytesRead += toRead;
+ return toRead;
+ }
+
+ /**
+ * Implementation of read for DEFLATED entries.
+ */
+ private int readDeflated(byte[] buffer, int offset, int length) throws IOException {
+ int read = readFromInflater(buffer, offset, length);
+ if (read <= 0) {
+ if (inf.finished()) {
+ return -1;
+ } else if (inf.needsDictionary()) {
+ throw new ZipException("This archive needs a preset dictionary"
+ + " which is not supported by Commons"
+ + " Compress.");
+ } else if (read == -1) {
+ throw new IOException("Truncated ZIP file");
+ }
+ }
+ return read;
+ }
+
+ /**
+ * Potentially reads more bytes to fill the inflater's buffer and
+ * reads from it.
+ */
+ private int readFromInflater(byte[] buffer, int offset, int length) throws IOException {
+ int read = 0;
+ do {
+ if (inf.needsInput()) {
+ int l = fill();
+ if (l > 0) {
+ current.bytesReadFromStream += buf.limit();
+ } else if (l == -1) {
+ return -1;
+ } else {
+ break;
+ }
+ }
+ try {
+ read = inf.inflate(buffer, offset, length);
+ } catch (DataFormatException e) {
+ throw (IOException) new ZipException(e.getMessage()).initCause(e);
+ }
+ } while (read == 0 && inf.needsInput());
+ return read;
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (!closed) {
+ closed = true;
+ in.close();
+ inf.end();
+ }
+ }
+
+ /**
+ * Skips over and discards value bytes of data from this input
+ * stream.
+ *
+ * This implementation may end up skipping over some smaller
+ * number of bytes, possibly 0, if and only if it reaches the end
+ * of the underlying stream.
+ *
+ * The actual number of bytes skipped is returned.
+ *
+ * @param value the number of bytes to be skipped.
+ * @return the actual number of bytes skipped.
+ * @throws IOException - if an I/O error occurs.
+ * @throws IllegalArgumentException - if value is negative.
+ */
+ @Override
+ public long skip(long value) throws IOException {
+ if (value >= 0) {
+ long skipped = 0;
+ while (skipped < value) {
+ long rem = value - skipped;
+ int x = read(SKIP_BUF, 0, (int) (SKIP_BUF.length > rem ? rem : SKIP_BUF.length));
+ if (x == -1) {
+ return skipped;
+ }
+ skipped += x;
+ }
+ return skipped;
+ }
+ throw new IllegalArgumentException();
+ }
+
+ /**
+ * Checks if the signature matches what is expected for a zip file.
+ * Does not currently handle self-extracting zips which may have arbitrary
+ * leading content.
+ *
+ * @param signature the bytes to check
+ * @param length the number of bytes to check
+ * @return true, if this stream is a zip archive stream, false otherwise
+ */
+ public static boolean matches(byte[] signature, int length) {
+ if (length < ZipArchiveOutputStream.LFH_SIG.length) {
+ return false;
+ }
+
+ return checksig(signature, ZipArchiveOutputStream.LFH_SIG) // normal file
+ || checksig(signature, ZipArchiveOutputStream.EOCD_SIG) // empty zip
+ || checksig(signature, ZipArchiveOutputStream.DD_SIG) // split zip
+ || checksig(signature, ZipLong.SINGLE_SEGMENT_SPLIT_MARKER.getBytes());
+ }
+
+ private static boolean checksig(byte[] signature, byte[] expected) {
+ for (int i = 0; i < expected.length; i++) {
+ if (signature[i] != expected[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Closes the current ZIP archive entry and positions the underlying
+ * stream to the beginning of the next entry. All per-entry variables
+ * and data structures are cleared.
+ *
+ * If the compressed size of this entry is included in the entry header,
+ * then any outstanding bytes are simply skipped from the underlying
+ * stream without uncompressing them. This allows an entry to be safely
+ * closed even if the compression method is unsupported.
+ *
+ * In case we don't know the compressed size of this entry or have
+ * already buffered too much data from the underlying stream to support
+ * uncompression, then the uncompression process is completed and the
+ * end position of the stream is adjusted based on the result of that
+ * process.
+ *
+ * @throws IOException if an error occurs
+ */
+ private void closeEntry() throws IOException {
+ if (closed) {
+ throw new IOException("The stream is closed");
+ }
+ if (current == null) {
+ return;
+ }
+
+ // Ensure all entry bytes are read
+ if (current.bytesReadFromStream <= current.entry.getCompressedSize()
+ && !current.hasDataDescriptor) {
+ drainCurrentEntryData();
+ } else {
+ skip(Long.MAX_VALUE);
+
+ long inB = current.entry.getMethod() == ZipArchiveOutputStream.DEFLATED
+ ? getBytesInflated() : current.bytesRead;
+
+ // this is at most a single read() operation and can't
+ // exceed the range of int
+ int diff = (int) (current.bytesReadFromStream - inB);
+
+ // Pushback any required bytes
+ if (diff > 0) {
+ pushback(buf.array(), buf.limit() - diff, diff);
+ }
+ }
+
+ if (lastStoredEntry == null && current.hasDataDescriptor) {
+ readDataDescriptor();
+ }
+
+ inf.reset();
+ buf.clear().flip();
+ current = null;
+ lastStoredEntry = null;
+ }
+
+ /**
+ * Read all data of the current entry from the underlying stream
+ * that hasn't been read, yet.
+ */
+ private void drainCurrentEntryData() throws IOException {
+ long remaining = current.entry.getCompressedSize() - current.bytesReadFromStream;
+ while (remaining > 0) {
+ long n = in.read(buf.array(), 0, (int) Math.min(buf.capacity(), remaining));
+ if (n < 0) {
+ throw new EOFException("Truncated ZIP entry: " + current.entry.getName());
+ } else {
+ count(n);
+ remaining -= n;
+ }
+ }
+ }
+
+ /**
+ * Get the number of bytes Inflater has actually processed.
+ *
+ *
for Java < Java7 the getBytes* methods in
+ * Inflater/Deflater seem to return unsigned ints rather than
+ * longs that start over with 0 at 2^32.
+ *
+ * The stream knows how many bytes it has read, but not how
+ * many the Inflater actually consumed - it should be between the
+ * total number of bytes read for the entry and the total number
+ * minus the last read operation. Here we just try to make the
+ * value close enough to the bytes we've read by assuming the
+ * number of bytes consumed must be smaller than (or equal to) the
+ * number of bytes read but not smaller by more than 2^32.
+ */
+ private long getBytesInflated() {
+ long inB = inf.getBytesRead();
+ if (current.bytesReadFromStream >= TWO_EXP_32) {
+ while (inB + TWO_EXP_32 <= current.bytesReadFromStream) {
+ inB += TWO_EXP_32;
+ }
+ }
+ return inB;
+ }
+
+ private int fill() throws IOException {
+ if (closed) {
+ throw new IOException("The stream is closed");
+ }
+ int length = in.read(buf.array());
+ if (length > 0) {
+ buf.limit(length);
+ count(buf.limit());
+ inf.setInput(buf.array(), 0, buf.limit());
+ }
+ return length;
+ }
+
+ private void readFully(byte[] b) throws IOException {
+ int count = IOUtils.readFully(in, b);
+ count(count);
+ if (count < b.length) {
+ throw new EOFException();
+ }
+ }
+
+ private void readDataDescriptor() throws IOException {
+ readFully(WORD_BUF);
+ ZipLong val = new ZipLong(WORD_BUF);
+ if (ZipLong.DD_SIG.equals(val)) {
+ // data descriptor with signature, skip sig
+ readFully(WORD_BUF);
+ val = new ZipLong(WORD_BUF);
+ }
+ current.entry.setCrc(val.getValue());
+
+ // if there is a ZIP64 extra field, sizes are eight bytes
+ // each, otherwise four bytes each. Unfortunately some
+ // implementations - namely Java7 - use eight bytes without
+ // using a ZIP64 extra field -
+ // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588
+
+ // just read 16 bytes and check whether bytes nine to twelve
+ // look like one of the signatures of what could follow a data
+ // descriptor (ignoring archive decryption headers for now).
+ // If so, push back eight bytes and assume sizes are four
+ // bytes, otherwise sizes are eight bytes each.
+ readFully(TWO_DWORD_BUF);
+ ZipLong potentialSig = new ZipLong(TWO_DWORD_BUF, DWORD);
+ if (potentialSig.equals(ZipLong.CFH_SIG) || potentialSig.equals(ZipLong.LFH_SIG)) {
+ pushback(TWO_DWORD_BUF, DWORD, DWORD);
+ current.entry.setCompressedSize(ZipLong.getValue(TWO_DWORD_BUF));
+ current.entry.setSize(ZipLong.getValue(TWO_DWORD_BUF, WORD));
+ } else {
+ current.entry.setCompressedSize(ZipEightByteInteger.getLongValue(TWO_DWORD_BUF));
+ current.entry.setSize(ZipEightByteInteger.getLongValue(TWO_DWORD_BUF, DWORD));
+ }
+ }
+
+ /**
+ * Whether this entry requires a data descriptor this library can work with.
+ *
+ * @return true if allowStoredEntriesWithDataDescriptor is true,
+ * the entry doesn't require any data descriptor or the method is
+ * DEFLATED.
+ */
+ private boolean supportsDataDescriptorFor(ZipArchiveEntry entry) {
+ return !entry.getGeneralPurposeBit().usesDataDescriptor()
+
+ || (allowStoredEntriesWithDataDescriptor && entry.getMethod() == ZipEntry.STORED)
+ || entry.getMethod() == ZipEntry.DEFLATED;
+ }
+
+ /**
+ * Caches a stored entry that uses the data descriptor.
+ *
+ *
+ * - Reads a stored entry until the signature of a local file
+ * header, central directory header or data descriptor has been
+ * found.
+ * - Stores all entry data in lastStoredEntry.
+ * Rewinds the stream to position at the data
+ * descriptor.
+ * reads the data descriptor
+ *
+ *
+ * After calling this method the entry should know its size,
+ * the entry's data is cached and the stream is positioned at the
+ * next local file or central directory header.
+ */
+ private void readStoredEntry() throws IOException {
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ int off = 0;
+ boolean done = false;
+
+ // length of DD without signature
+ int ddLen = current.usesZip64 ? WORD + 2 * DWORD : 3 * WORD;
+
+ while (!done) {
+ int r = in.read(buf.array(), off, ZipArchiveOutputStream.BUFFER_SIZE - off);
+ if (r <= 0) {
+ // read the whole archive without ever finding a
+ // central directory
+ throw new IOException("Truncated ZIP file");
+ }
+ if (r + off < 4) {
+ // buffer too small to check for a signature, loop
+ off += r;
+ continue;
+ }
+
+ done = bufferContainsSignature(bos, off, r, ddLen);
+ if (!done) {
+ off = cacheBytesRead(bos, off, r, ddLen);
+ }
+ }
+
+ byte[] b = bos.toByteArray();
+ lastStoredEntry = new ByteArrayInputStream(b);
+ }
+
+ private static final byte[] LFH = ZipLong.LFH_SIG.getBytes();
+ private static final byte[] CFH = ZipLong.CFH_SIG.getBytes();
+ private static final byte[] DD = ZipLong.DD_SIG.getBytes();
+
+ /**
+ * Checks whether the current buffer contains the signature of a
+ * "data decsriptor", "local file header" or
+ * "central directory entry".
+ *
+ * If it contains such a signature, reads the data descriptor
+ * and positions the stream right after the data descriptor.
+ */
+ private boolean bufferContainsSignature(ByteArrayOutputStream bos, int offset, int lastRead, int expectedDDLen)
+ throws IOException {
+
+ boolean done = false;
+ int readTooMuch = 0;
+ for (int i = 0; !done && i < lastRead - 4; i++) {
+ if (buf.array()[i] == LFH[0] && buf.array()[i + 1] == LFH[1]) {
+ if ((buf.array()[i + 2] == LFH[2] && buf.array()[i + 3] == LFH[3])
+ || (buf.array()[i] == CFH[2] && buf.array()[i + 3] == CFH[3])) {
+ // found a LFH or CFH:
+ readTooMuch = offset + lastRead - i - expectedDDLen;
+ done = true;
+ }
+ else if (buf.array()[i + 2] == DD[2] && buf.array()[i + 3] == DD[3]) {
+ // found DD:
+ readTooMuch = offset + lastRead - i;
+ done = true;
+ }
+ if (done) {
+ // * push back bytes read in excess as well as the data
+ // descriptor
+ // * copy the remaining bytes to cache
+ // * read data descriptor
+ pushback(buf.array(), offset + lastRead - readTooMuch, readTooMuch);
+ bos.write(buf.array(), 0, i);
+ readDataDescriptor();
+ }
+ }
+ }
+ return done;
+ }
+
+ /**
+ * If the last read bytes could hold a data descriptor and an
+ * incomplete signature then save the last bytes to the front of
+ * the buffer and cache everything in front of the potential data
+ * descriptor into the given ByteArrayOutputStream.
+ *
+ * Data descriptor plus incomplete signature (3 bytes in the
+ * worst case) can be 20 bytes max.
+ */
+ private int cacheBytesRead(ByteArrayOutputStream bos, int offset, int lastRead, int expecteDDLen) {
+ final int cacheable = offset + lastRead - expecteDDLen - 3;
+ if (cacheable > 0) {
+ bos.write(buf.array(), 0, cacheable);
+ System.arraycopy(buf.array(), cacheable, buf.array(), 0, expecteDDLen + 3);
+ offset = expecteDDLen + 3;
+ } else {
+ offset += lastRead;
+ }
+ return offset;
+ }
+
+ private void pushback(byte[] buf, int offset, int length) throws IOException {
+ ((PushbackInputStream) in).unread(buf, offset, length);
+ pushedBackBytes(length);
+ }
+
+ // End of Central Directory Record
+ // end of central dir signature WORD
+ // number of this disk SHORT
+ // number of the disk with the
+ // start of the central directory SHORT
+ // total number of entries in the
+ // central directory on this disk SHORT
+ // total number of entries in
+ // the central directory SHORT
+ // size of the central directory WORD
+ // offset of start of central
+ // directory with respect to
+ // the starting disk number WORD
+ // .ZIP file comment length SHORT
+ // .ZIP file comment up to 64KB
+ //
+
+ /**
+ * Reads the stream until it find the "End of central directory
+ * record" and consumes it as well.
+ */
+ private void skipRemainderOfArchive() throws IOException {
+ // skip over central directory. One LFH has been read too much
+ // already. The calculation discounts file names and extra
+ // data so it will be too short.
+ realSkip(entriesRead * CFH_LEN - LFH_LEN);
+ findEocdRecord();
+ realSkip(ZipFile.MIN_EOCD_SIZE - WORD /* signature */ - SHORT /* comment len */);
+ readFully(SHORT_BUF);
+ // file comment
+ realSkip(ZipShort.getValue(SHORT_BUF));
+ }
+
+ /**
+ * Reads forward until the signature of the "End of central
+ * directory" record is found.
+ */
+ private void findEocdRecord() throws IOException {
+ int currentByte = -1;
+ boolean skipReadCall = false;
+ while (skipReadCall || (currentByte = readOneByte()) > -1) {
+ skipReadCall = false;
+ if (!isFirstByteOfEocdSig(currentByte)) {
+ continue;
+ }
+ currentByte = readOneByte();
+ if (currentByte != ZipArchiveOutputStream.EOCD_SIG[1]) {
+ if (currentByte == -1) {
+ break;
+ }
+ skipReadCall = isFirstByteOfEocdSig(currentByte);
+ continue;
+ }
+ currentByte = readOneByte();
+ if (currentByte != ZipArchiveOutputStream.EOCD_SIG[2]) {
+ if (currentByte == -1) {
+ break;
+ }
+ skipReadCall = isFirstByteOfEocdSig(currentByte);
+ continue;
+ }
+ currentByte = readOneByte();
+ if (currentByte == -1
+ || currentByte == ZipArchiveOutputStream.EOCD_SIG[3]) {
+ break;
+ }
+ skipReadCall = isFirstByteOfEocdSig(currentByte);
+ }
+ }
+
+ /**
+ * Skips bytes by reading from the underlying stream rather than
+ * the (potentially inflating) archive stream - which {@link
+ * #skip} would do.
+ *
+ * Also updates bytes-read counter.
+ */
+ private void realSkip(long value) throws IOException {
+ if (value >= 0) {
+ long skipped = 0;
+ while (skipped < value) {
+ long rem = value - skipped;
+ int x = in.read(SKIP_BUF, 0, (int) (SKIP_BUF.length > rem ? rem : SKIP_BUF.length));
+ if (x == -1) {
+ return;
+ }
+ count(x);
+ skipped += x;
+ }
+ return;
+ }
+ throw new IllegalArgumentException();
+ }
+
+ /**
+ * Reads bytes by reading from the underlying stream rather than
+ * the (potentially inflating) archive stream - which {@link #read} would do.
+ *
+ * Also updates bytes-read counter.
+ */
+ private int readOneByte() throws IOException {
+ int b = in.read();
+ if (b != -1) {
+ count(1);
+ }
+ return b;
+ }
+
+ private boolean isFirstByteOfEocdSig(int b) {
+ return b == ZipArchiveOutputStream.EOCD_SIG[0];
+ }
+
+ /**
+ * Structure collecting information for the entry that is
+ * currently being read.
+ */
+ private static final class CurrentEntry {
+
+ /**
+ * Current ZIP entry.
+ */
+ private final ZipArchiveEntry entry = new ZipArchiveEntry();
+
+ /**
+ * Does the entry use a data descriptor?
+ */
+ private boolean hasDataDescriptor;
+
+ /**
+ * Does the entry have a ZIP64 extended information extra field.
+ */
+ private boolean usesZip64;
+
+ /**
+ * Number of bytes of entry content read by the client if the
+ * entry is STORED.
+ */
+ private long bytesRead;
+
+ /**
+ * Number of bytes of entry content read so from the stream.
+ *
+ * This may be more than the actual entry's length as some
+ * stuff gets buffered up and needs to be pushed back when the
+ * end of the entry has been reached.
+ */
+ private long bytesReadFromStream;
+
+ /**
+ * The checksum calculated as the current entry is read.
+ */
+ private final CRC32 crc = new CRC32();
+
+ /**
+ * The input stream decompressing the data for shrunk and imploded entries.
+ */
+ private InputStream in;
+ }
+
+ /**
+ * Bounded input stream adapted from commons-io
+ */
+ private class BoundedInputStream extends InputStream {
+
+ /** the wrapped input stream */
+ private final InputStream in;
+
+ /** the max length to provide */
+ private final long max;
+
+ /** the number of bytes already returned */
+ private long pos = 0;
+
+ /**
+ * Creates a new BoundedInputStream that wraps the given input
+ * stream and limits it to a certain size.
+ *
+ * @param in The wrapped input stream
+ * @param size The maximum number of bytes to return
+ */
+ public BoundedInputStream(final InputStream in, final long size) {
+ this.max = size;
+ this.in = in;
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (max >= 0 && pos >= max) {
+ return -1;
+ }
+ final int result = in.read();
+ pos++;
+ count(1);
+ current.bytesReadFromStream++;
+ return result;
+ }
+
+ @Override
+ public int read(final byte[] b) throws IOException {
+ return this.read(b, 0, b.length);
+ }
+
+ @Override
+ public int read(final byte[] b, final int off, final int len) throws IOException {
+ if (max >= 0 && pos >= max) {
+ return -1;
+ }
+ final long maxRead = max >= 0 ? Math.min(len, max - pos) : len;
+ final int bytesRead = in.read(b, off, (int) maxRead);
+
+ if (bytesRead == -1) {
+ return -1;
+ }
+
+ pos += bytesRead;
+ count(bytesRead);
+ current.bytesReadFromStream += bytesRead;
+ return bytesRead;
+ }
+
+ @Override
+ public long skip(final long n) throws IOException {
+ final long toSkip = max >= 0 ? Math.min(n, max - pos) : n;
+ final long skippedBytes = in.skip(toSkip);
+ pos += skippedBytes;
+ return skippedBytes;
+ }
+
+ @Override
+ public int available() throws IOException {
+ if (max >= 0 && pos >= max) {
+ return 0;
+ }
+ return in.available();
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipArchiveOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipArchiveOutputStream.java
new file mode 100644
index 000000000..12a1c66e7
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipArchiveOutputStream.java
@@ -0,0 +1,1501 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.zip.CRC32;
+import java.util.zip.Deflater;
+import java.util.zip.ZipException;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.utils.IOUtils;
+
+import static org.apache.commons.compress.archivers.zip.ZipConstants.DATA_DESCRIPTOR_MIN_VERSION;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.INITIAL_VERSION;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC_SHORT;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MIN_VERSION;
+
+/**
+ * Reimplementation of {@link java.util.zip.ZipOutputStream
+ * java.util.zip.ZipOutputStream} that does handle the extended
+ * functionality of this package, especially internal/external file
+ * attributes and extra fields with different layouts for local file
+ * data and central directory entries.
+ *
+ * This class will try to use {@link java.io.RandomAccessFile
+ * RandomAccessFile} when you know that the output is going to go to a
+ * file.
+ *
+ * If RandomAccessFile cannot be used, this implementation will use
+ * a Data Descriptor to store size and CRC information for {@link
+ * #DEFLATED DEFLATED} entries, this means, you don't need to
+ * calculate them yourself. Unfortunately this is not possible for
+ * the {@link #STORED STORED} method, here setting the CRC and
+ * uncompressed size information is required before {@link
+ * #putArchiveEntry(ArchiveEntry)} can be called.
+ *
+ * As of Apache Commons Compress 1.3 it transparently supports Zip64
+ * extensions and thus individual entries and archives larger than 4
+ * GB or with more than 65536 entries in most cases but explicit
+ * control is provided via {@link #setUseZip64}. If the stream can not
+ * user RandomAccessFile and you try to write a ZipArchiveEntry of
+ * unknown size then Zip64 extensions will be disabled by default.
+ *
+ * @NotThreadSafe
+ */
+public class ZipArchiveOutputStream extends ArchiveOutputStream {
+
+ static final int BUFFER_SIZE = 512;
+
+ /** indicates if this archive is finished. protected for use in Jar implementation */
+ protected boolean finished = false;
+
+ /*
+ * Apparently Deflater.setInput gets slowed down a lot on Sun JVMs
+ * when it gets handed a really big buffer. See
+ * https://issues.apache.org/bugzilla/show_bug.cgi?id=45396
+ *
+ * Using a buffer size of 8 kB proved to be a good compromise
+ */
+ private static final int DEFLATER_BLOCK_SIZE = 8192;
+
+ /**
+ * Compression method for deflated entries.
+ */
+ public static final int DEFLATED = java.util.zip.ZipEntry.DEFLATED;
+
+ /**
+ * Default compression level for deflated entries.
+ */
+ public static final int DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION;
+
+ /**
+ * Compression method for stored entries.
+ */
+ public static final int STORED = java.util.zip.ZipEntry.STORED;
+
+ /**
+ * default encoding for file names and comment.
+ */
+ static final String DEFAULT_ENCODING = ZipEncodingHelper.UTF8;
+
+ /**
+ * General purpose flag, which indicates that filenames are
+ * written in UTF-8.
+ * @deprecated use {@link GeneralPurposeBit#UFT8_NAMES_FLAG} instead
+ */
+ @Deprecated
+ public static final int EFS_FLAG = GeneralPurposeBit.UFT8_NAMES_FLAG;
+
+ private static final byte[] EMPTY = new byte[0];
+
+ /**
+ * Current entry.
+ */
+ private CurrentEntry entry;
+
+ /**
+ * The file comment.
+ */
+ private String comment = "";
+
+ /**
+ * Compression level for next entry.
+ */
+ private int level = DEFAULT_COMPRESSION;
+
+ /**
+ * Has the compression level changed when compared to the last
+ * entry?
+ */
+ private boolean hasCompressionLevelChanged = false;
+
+ /**
+ * Default compression method for next entry.
+ */
+ private int method = java.util.zip.ZipEntry.DEFLATED;
+
+ /**
+ * List of ZipArchiveEntries written so far.
+ */
+ private final List entries =
+ new LinkedList();
+
+ /**
+ * CRC instance to avoid parsing DEFLATED data twice.
+ */
+ private final CRC32 crc = new CRC32();
+
+ /**
+ * Count the bytes written to out.
+ */
+ private long written = 0;
+
+ /**
+ * Start of central directory.
+ */
+ private long cdOffset = 0;
+
+ /**
+ * Length of central directory.
+ */
+ private long cdLength = 0;
+
+ /**
+ * Helper, a 0 as ZipShort.
+ */
+ private static final byte[] ZERO = {0, 0};
+
+ /**
+ * Helper, a 0 as ZipLong.
+ */
+ private static final byte[] LZERO = {0, 0, 0, 0};
+
+ /**
+ * Holds the offsets of the LFH starts for each entry.
+ */
+ private final Map offsets =
+ new HashMap();
+
+ /**
+ * The encoding to use for filenames and the file comment.
+ *
+ * For a list of possible values see http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html.
+ * Defaults to UTF-8.
+ */
+ private String encoding = DEFAULT_ENCODING;
+
+ /**
+ * The zip encoding to use for filenames and the file comment.
+ *
+ * This field is of internal use and will be set in {@link
+ * #setEncoding(String)}.
+ */
+ private ZipEncoding zipEncoding =
+ ZipEncodingHelper.getZipEncoding(DEFAULT_ENCODING);
+
+ /**
+ * This Deflater object is used for output.
+ *
+ */
+ protected final Deflater def = new Deflater(level, true);
+
+ /**
+ * This buffer serves as a Deflater.
+ *
+ */
+ private final byte[] buf = new byte[BUFFER_SIZE];
+
+ /**
+ * Optional random access output.
+ */
+ private final RandomAccessFile raf;
+
+ private final OutputStream out;
+
+ /**
+ * whether to use the general purpose bit flag when writing UTF-8
+ * filenames or not.
+ */
+ private boolean useUTF8Flag = true;
+
+ /**
+ * Whether to encode non-encodable file names as UTF-8.
+ */
+ private boolean fallbackToUTF8 = false;
+
+ /**
+ * whether to create UnicodePathExtraField-s for each entry.
+ */
+ private UnicodeExtraFieldPolicy createUnicodeExtraFields = UnicodeExtraFieldPolicy.NEVER;
+
+ /**
+ * Whether anything inside this archive has used a ZIP64 feature.
+ *
+ * @since 1.3
+ */
+ private boolean hasUsedZip64 = false;
+
+ private Zip64Mode zip64Mode = Zip64Mode.AsNeeded;
+
+ /**
+ * Creates a new ZIP OutputStream filtering the underlying stream.
+ * @param out the outputstream to zip
+ */
+ public ZipArchiveOutputStream(OutputStream out) {
+ this.out = out;
+ this.raf = null;
+ }
+
+ /**
+ * Creates a new ZIP OutputStream writing to a File. Will use
+ * random access if possible.
+ * @param file the file to zip to
+ * @throws IOException on error
+ */
+ public ZipArchiveOutputStream(File file) throws IOException {
+ OutputStream o = null;
+ RandomAccessFile _raf = null;
+ try {
+ _raf = new RandomAccessFile(file, "rw");
+ _raf.setLength(0);
+ } catch (IOException e) {
+ IOUtils.closeQuietly(_raf);
+ _raf = null;
+ o = new FileOutputStream(file);
+ }
+ out = o;
+ raf = _raf;
+ }
+
+ /**
+ * This method indicates whether this archive is writing to a
+ * seekable stream (i.e., to a random access file).
+ *
+ * For seekable streams, you don't need to calculate the CRC or
+ * uncompressed size for {@link #STORED} entries before
+ * invoking {@link #putArchiveEntry(ArchiveEntry)}.
+ * @return true if seekable
+ */
+ public boolean isSeekable() {
+ return raf != null;
+ }
+
+ /**
+ * The encoding to use for filenames and the file comment.
+ *
+ *
For a list of possible values see http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html.
+ * Defaults to UTF-8.
+ * @param encoding the encoding to use for file names, use null
+ * for the platform's default encoding
+ */
+ public void setEncoding(final String encoding) {
+ this.encoding = encoding;
+ this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
+ if (useUTF8Flag && !ZipEncodingHelper.isUTF8(encoding)) {
+ useUTF8Flag = false;
+ }
+ }
+
+ /**
+ * The encoding to use for filenames and the file comment.
+ *
+ * @return null if using the platform's default character encoding.
+ */
+ public String getEncoding() {
+ return encoding;
+ }
+
+ /**
+ * Whether to set the language encoding flag if the file name
+ * encoding is UTF-8.
+ *
+ * Defaults to true.
+ */
+ public void setUseLanguageEncodingFlag(boolean b) {
+ useUTF8Flag = b && ZipEncodingHelper.isUTF8(encoding);
+ }
+
+ /**
+ * Whether to create Unicode Extra Fields.
+ *
+ * Defaults to NEVER.
+ */
+ public void setCreateUnicodeExtraFields(UnicodeExtraFieldPolicy b) {
+ createUnicodeExtraFields = b;
+ }
+
+ /**
+ * Whether to fall back to UTF and the language encoding flag if
+ * the file name cannot be encoded using the specified encoding.
+ *
+ * Defaults to false.
+ */
+ public void setFallbackToUTF8(boolean b) {
+ fallbackToUTF8 = b;
+ }
+
+ /**
+ * Whether Zip64 extensions will be used.
+ *
+ * When setting the mode to {@link Zip64Mode#Never Never},
+ * {@link #putArchiveEntry}, {@link #closeArchiveEntry}, {@link
+ * #finish} or {@link #close} may throw a {@link
+ * Zip64RequiredException} if the entry's size or the total size
+ * of the archive exceeds 4GB or there are more than 65536 entries
+ * inside the archive. Any archive created in this mode will be
+ * readable by implementations that don't support Zip64.
+ *
+ * When setting the mode to {@link Zip64Mode#Always Always},
+ * Zip64 extensions will be used for all entries. Any archive
+ * created in this mode may be unreadable by implementations that
+ * don't support Zip64 even if all its contents would be.
+ *
+ * When setting the mode to {@link Zip64Mode#AsNeeded
+ * AsNeeded}, Zip64 extensions will transparently be used for
+ * those entries that require them. This mode can only be used if
+ * the uncompressed size of the {@link ZipArchiveEntry} is known
+ * when calling {@link #putArchiveEntry} or the archive is written
+ * to a seekable output (i.e. you have used the {@link
+ * #ZipArchiveOutputStream(java.io.File) File-arg constructor}) -
+ * this mode is not valid when the output stream is not seekable
+ * and the uncompressed size is unknown when {@link
+ * #putArchiveEntry} is called.
+ *
+ * If no entry inside the resulting archive requires Zip64
+ * extensions then {@link Zip64Mode#Never Never} will create the
+ * smallest archive. {@link Zip64Mode#AsNeeded AsNeeded} will
+ * create a slightly bigger archive if the uncompressed size of
+ * any entry has initially been unknown and create an archive
+ * identical to {@link Zip64Mode#Never Never} otherwise. {@link
+ * Zip64Mode#Always Always} will create an archive that is at
+ * least 24 bytes per entry bigger than the one {@link
+ * Zip64Mode#Never Never} would create.
+ *
+ * Defaults to {@link Zip64Mode#AsNeeded AsNeeded} unless
+ * {@link #putArchiveEntry} is called with an entry of unknown
+ * size and data is written to a non-seekable stream - in this
+ * case the default is {@link Zip64Mode#Never Never}.
+ *
+ * @since 1.3
+ */
+ public void setUseZip64(Zip64Mode mode) {
+ zip64Mode = mode;
+ }
+
+ /**
+ * {@inheritDoc}
+ * @throws Zip64RequiredException if the archive's size exceeds 4
+ * GByte or there are more than 65535 entries inside the archive
+ * and {@link #setUseZip64} is {@link Zip64Mode#Never}.
+ */
+ @Override
+ public void finish() throws IOException {
+ if (finished) {
+ throw new IOException("This archive has already been finished");
+ }
+
+ if (entry != null) {
+ throw new IOException("This archive contains unclosed entries.");
+ }
+
+ cdOffset = written;
+ for (ZipArchiveEntry ze : entries) {
+ writeCentralFileHeader(ze);
+ }
+ cdLength = written - cdOffset;
+ writeZip64CentralDirectory();
+ writeCentralDirectoryEnd();
+ offsets.clear();
+ entries.clear();
+ def.end();
+ finished = true;
+ }
+
+ /**
+ * Writes all necessary data for this entry.
+ * @throws IOException on error
+ * @throws Zip64RequiredException if the entry's uncompressed or
+ * compressed size exceeds 4 GByte and {@link #setUseZip64}
+ * is {@link Zip64Mode#Never}.
+ */
+ @Override
+ public void closeArchiveEntry() throws IOException {
+ if (finished) {
+ throw new IOException("Stream has already been finished");
+ }
+
+ if (entry == null) {
+ throw new IOException("No current entry to close");
+ }
+
+ if (!entry.hasWritten) {
+ write(EMPTY, 0, 0);
+ }
+
+ flushDeflater();
+
+ final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry);
+ long bytesWritten = written - entry.dataStart;
+ long realCrc = crc.getValue();
+ crc.reset();
+
+ final boolean actuallyNeedsZip64 =
+ handleSizesAndCrc(bytesWritten, realCrc, effectiveMode);
+
+ if (raf != null) {
+ rewriteSizesAndCrc(actuallyNeedsZip64);
+ }
+
+ writeDataDescriptor(entry.entry);
+ entry = null;
+ }
+
+ /**
+ * Ensures all bytes sent to the deflater are written to the stream.
+ */
+ private void flushDeflater() throws IOException {
+ if (entry.entry.getMethod() == DEFLATED) {
+ def.finish();
+ while (!def.finished()) {
+ deflate();
+ }
+ }
+ }
+
+ /**
+ * Ensures the current entry's size and CRC information is set to
+ * the values just written, verifies it isn't too big in the
+ * Zip64Mode.Never case and returns whether the entry would
+ * require a Zip64 extra field.
+ */
+ private boolean handleSizesAndCrc(long bytesWritten, long crc,
+ Zip64Mode effectiveMode)
+ throws ZipException {
+ if (entry.entry.getMethod() == DEFLATED) {
+ /* It turns out def.getBytesRead() returns wrong values if
+ * the size exceeds 4 GB on Java < Java7
+ entry.entry.setSize(def.getBytesRead());
+ */
+ entry.entry.setSize(entry.bytesRead);
+ entry.entry.setCompressedSize(bytesWritten);
+ entry.entry.setCrc(crc);
+
+ def.reset();
+ } else if (raf == null) {
+ if (entry.entry.getCrc() != crc) {
+ throw new ZipException("bad CRC checksum for entry "
+ + entry.entry.getName() + ": "
+ + Long.toHexString(entry.entry.getCrc())
+ + " instead of "
+ + Long.toHexString(crc));
+ }
+
+ if (entry.entry.getSize() != bytesWritten) {
+ throw new ZipException("bad size for entry "
+ + entry.entry.getName() + ": "
+ + entry.entry.getSize()
+ + " instead of "
+ + bytesWritten);
+ }
+ } else { /* method is STORED and we used RandomAccessFile */
+ entry.entry.setSize(bytesWritten);
+ entry.entry.setCompressedSize(bytesWritten);
+ entry.entry.setCrc(crc);
+ }
+
+ final boolean actuallyNeedsZip64 = effectiveMode == Zip64Mode.Always
+ || entry.entry.getSize() >= ZIP64_MAGIC
+ || entry.entry.getCompressedSize() >= ZIP64_MAGIC;
+ if (actuallyNeedsZip64 && effectiveMode == Zip64Mode.Never) {
+ throw new Zip64RequiredException(Zip64RequiredException
+ .getEntryTooBigMessage(entry.entry));
+ }
+ return actuallyNeedsZip64;
+ }
+
+ /**
+ * When using random access output, write the local file header
+ * and potentiall the ZIP64 extra containing the correct CRC and
+ * compressed/uncompressed sizes.
+ */
+ private void rewriteSizesAndCrc(boolean actuallyNeedsZip64)
+ throws IOException {
+ long save = raf.getFilePointer();
+
+ raf.seek(entry.localDataStart);
+ writeOut(ZipLong.getBytes(entry.entry.getCrc()));
+ if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) {
+ writeOut(ZipLong.getBytes(entry.entry.getCompressedSize()));
+ writeOut(ZipLong.getBytes(entry.entry.getSize()));
+ } else {
+ writeOut(ZipLong.ZIP64_MAGIC.getBytes());
+ writeOut(ZipLong.ZIP64_MAGIC.getBytes());
+ }
+
+ if (hasZip64Extra(entry.entry)) {
+ // seek to ZIP64 extra, skip header and size information
+ raf.seek(entry.localDataStart + 3 * WORD + 2 * SHORT
+ + getName(entry.entry).limit() + 2 * SHORT);
+ // inside the ZIP64 extra uncompressed size comes
+ // first, unlike the LFH, CD or data descriptor
+ writeOut(ZipEightByteInteger.getBytes(entry.entry.getSize()));
+ writeOut(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize()));
+
+ if (!actuallyNeedsZip64) {
+ // do some cleanup:
+ // * rewrite version needed to extract
+ raf.seek(entry.localDataStart - 5 * SHORT);
+ writeOut(ZipShort.getBytes(INITIAL_VERSION));
+
+ // * remove ZIP64 extra so it doesn't get written
+ // to the central directory
+ entry.entry.removeExtraField(Zip64ExtendedInformationExtraField
+ .HEADER_ID);
+ entry.entry.setExtra();
+
+ // * reset hasUsedZip64 if it has been set because
+ // of this entry
+ if (entry.causedUseOfZip64) {
+ hasUsedZip64 = false;
+ }
+ }
+ }
+ raf.seek(save);
+ }
+
+ /**
+ * {@inheritDoc}
+ * @throws ClassCastException if entry is not an instance of ZipArchiveEntry
+ * @throws Zip64RequiredException if the entry's uncompressed or
+ * compressed size is known to exceed 4 GByte and {@link #setUseZip64}
+ * is {@link Zip64Mode#Never}.
+ */
+ @Override
+ public void putArchiveEntry(ArchiveEntry archiveEntry) throws IOException {
+ if (finished) {
+ throw new IOException("Stream has already been finished");
+ }
+
+ if (entry != null) {
+ closeArchiveEntry();
+ }
+
+ entry = new CurrentEntry((ZipArchiveEntry) archiveEntry);
+ entries.add(entry.entry);
+
+ setDefaults(entry.entry);
+
+ final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry);
+ validateSizeInformation(effectiveMode);
+
+ if (shouldAddZip64Extra(entry.entry, effectiveMode)) {
+
+ Zip64ExtendedInformationExtraField z64 = getZip64Extra(entry.entry);
+
+ // just a placeholder, real data will be in data
+ // descriptor or inserted later via RandomAccessFile
+ ZipEightByteInteger size = ZipEightByteInteger.ZERO;
+ if (entry.entry.getMethod() == STORED
+ && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) {
+ // actually, we already know the sizes
+ size = new ZipEightByteInteger(entry.entry.getSize());
+ }
+ z64.setSize(size);
+ z64.setCompressedSize(size);
+ entry.entry.setExtra();
+ }
+
+ if (entry.entry.getMethod() == DEFLATED && hasCompressionLevelChanged) {
+ def.setLevel(level);
+ hasCompressionLevelChanged = false;
+ }
+ writeLocalFileHeader(entry.entry);
+ }
+
+ /**
+ * Provides default values for compression method and last
+ * modification time.
+ */
+ private void setDefaults(ZipArchiveEntry entry) {
+ if (entry.getMethod() == -1) { // not specified
+ entry.setMethod(method);
+ }
+
+ if (entry.getTime() == -1) { // not specified
+ entry.setTime(System.currentTimeMillis());
+ }
+ }
+
+ /**
+ * Throws an exception if the size is unknown for a stored entry
+ * that is written to a non-seekable output or the entry is too
+ * big to be written without Zip64 extra but the mode has been set
+ * to Never.
+ */
+ private void validateSizeInformation(Zip64Mode effectiveMode)
+ throws ZipException {
+ // Size/CRC not required if RandomAccessFile is used
+ if (entry.entry.getMethod() == STORED && raf == null) {
+ if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) {
+ throw new ZipException("uncompressed size is required for"
+ + " STORED method when not writing to a"
+ + " file");
+ }
+ if (entry.entry.getCrc() == -1) {
+ throw new ZipException("crc checksum is required for STORED"
+ + " method when not writing to a file");
+ }
+ entry.entry.setCompressedSize(entry.entry.getSize());
+ }
+
+ if ((entry.entry.getSize() >= ZIP64_MAGIC
+ || entry.entry.getCompressedSize() >= ZIP64_MAGIC)
+ && effectiveMode == Zip64Mode.Never) {
+ throw new Zip64RequiredException(Zip64RequiredException
+ .getEntryTooBigMessage(entry.entry));
+ }
+ }
+
+ /**
+ * Whether to addd a Zip64 extended information extra field to the
+ * local file header.
+ *
+ * Returns true if
+ *
+ *
+ * - mode is Always
+ * - or we already know it is going to be needed
+ * - or the size is unknown and we can ensure it won't hurt
+ * other implementations if we add it (i.e. we can erase its
+ * usage
+ *
+ */
+ private boolean shouldAddZip64Extra(ZipArchiveEntry entry, Zip64Mode mode) {
+ return mode == Zip64Mode.Always
+ || entry.getSize() >= ZIP64_MAGIC
+ || entry.getCompressedSize() >= ZIP64_MAGIC
+ || (entry.getSize() == ArchiveEntry.SIZE_UNKNOWN
+ && raf != null && mode != Zip64Mode.Never);
+ }
+
+ /**
+ * Set the file comment.
+ * @param comment the comment
+ */
+ public void setComment(String comment) {
+ this.comment = comment;
+ }
+
+ /**
+ * Sets the compression level for subsequent entries.
+ *
+ * Default is Deflater.DEFAULT_COMPRESSION.
+ * @param level the compression level.
+ * @throws IllegalArgumentException if an invalid compression
+ * level is specified.
+ */
+ public void setLevel(int level) {
+ if (level < Deflater.DEFAULT_COMPRESSION
+ || level > Deflater.BEST_COMPRESSION) {
+ throw new IllegalArgumentException("Invalid compression level: "
+ + level);
+ }
+ hasCompressionLevelChanged = (this.level != level);
+ this.level = level;
+ }
+
+ /**
+ * Sets the default compression method for subsequent entries.
+ *
+ * Default is DEFLATED.
+ * @param method an int from java.util.zip.ZipEntry
+ */
+ public void setMethod(int method) {
+ this.method = method;
+ }
+
+ /**
+ * Whether this stream is able to write the given entry.
+ *
+ * May return false if it is set up to use encryption or a
+ * compression method that hasn't been implemented yet.
+ * @since 1.1
+ */
+ @Override
+ public boolean canWriteEntryData(ArchiveEntry ae) {
+ if (ae instanceof ZipArchiveEntry) {
+ ZipArchiveEntry zae = (ZipArchiveEntry) ae;
+ return zae.getMethod() != ZipMethod.IMPLODING.getCode()
+ && zae.getMethod() != ZipMethod.UNSHRINKING.getCode()
+ && ZipUtil.canHandleEntryData(zae);
+ }
+ return false;
+ }
+
+ /**
+ * Writes bytes to ZIP entry.
+ * @param b the byte array to write
+ * @param offset the start position to write from
+ * @param length the number of bytes to write
+ * @throws IOException on error
+ */
+ @Override
+ public void write(byte[] b, int offset, int length) throws IOException {
+ ZipUtil.checkRequestedFeatures(entry.entry);
+ entry.hasWritten = true;
+ if (entry.entry.getMethod() == DEFLATED) {
+ writeDeflated(b, offset, length);
+ } else {
+ writeOut(b, offset, length);
+ written += length;
+ }
+ crc.update(b, offset, length);
+ count(length);
+ }
+
+ /**
+ * write implementation for DEFLATED entries.
+ */
+ private void writeDeflated(byte[]b, int offset, int length)
+ throws IOException {
+ if (length > 0 && !def.finished()) {
+ entry.bytesRead += length;
+ if (length <= DEFLATER_BLOCK_SIZE) {
+ def.setInput(b, offset, length);
+ deflateUntilInputIsNeeded();
+ } else {
+ final int fullblocks = length / DEFLATER_BLOCK_SIZE;
+ for (int i = 0; i < fullblocks; i++) {
+ def.setInput(b, offset + i * DEFLATER_BLOCK_SIZE,
+ DEFLATER_BLOCK_SIZE);
+ deflateUntilInputIsNeeded();
+ }
+ final int done = fullblocks * DEFLATER_BLOCK_SIZE;
+ if (done < length) {
+ def.setInput(b, offset + done, length - done);
+ deflateUntilInputIsNeeded();
+ }
+ }
+ }
+ }
+
+ /**
+ * Closes this output stream and releases any system resources
+ * associated with the stream.
+ *
+ * @exception IOException if an I/O error occurs.
+ * @throws Zip64RequiredException if the archive's size exceeds 4
+ * GByte or there are more than 65535 entries inside the archive
+ * and {@link #setUseZip64} is {@link Zip64Mode#Never}.
+ */
+ @Override
+ public void close() throws IOException {
+ if (!finished) {
+ finish();
+ }
+ destroy();
+ }
+
+ /**
+ * Flushes this output stream and forces any buffered output bytes
+ * to be written out to the stream.
+ *
+ * @exception IOException if an I/O error occurs.
+ */
+ @Override
+ public void flush() throws IOException {
+ if (out != null) {
+ out.flush();
+ }
+ }
+
+ /*
+ * Various ZIP constants
+ */
+ /**
+ * local file header signature
+ */
+ static final byte[] LFH_SIG = ZipLong.LFH_SIG.getBytes();
+ /**
+ * data descriptor signature
+ */
+ static final byte[] DD_SIG = ZipLong.DD_SIG.getBytes();
+ /**
+ * central file header signature
+ */
+ static final byte[] CFH_SIG = ZipLong.CFH_SIG.getBytes();
+ /**
+ * end of central dir signature
+ */
+ static final byte[] EOCD_SIG = ZipLong.getBytes(0X06054B50L);
+ /**
+ * ZIP64 end of central dir signature
+ */
+ static final byte[] ZIP64_EOCD_SIG = ZipLong.getBytes(0X06064B50L);
+ /**
+ * ZIP64 end of central dir locator signature
+ */
+ static final byte[] ZIP64_EOCD_LOC_SIG = ZipLong.getBytes(0X07064B50L);
+
+ /**
+ * Writes next block of compressed data to the output stream.
+ * @throws IOException on error
+ */
+ protected final void deflate() throws IOException {
+ int len = def.deflate(buf, 0, buf.length);
+ if (len > 0) {
+ writeOut(buf, 0, len);
+ written += len;
+ }
+ }
+
+ /**
+ * Writes the local file header entry
+ * @param ze the entry to write
+ * @throws IOException on error
+ */
+ protected void writeLocalFileHeader(ZipArchiveEntry ze) throws IOException {
+
+ boolean encodable = zipEncoding.canEncode(ze.getName());
+ ByteBuffer name = getName(ze);
+
+ if (createUnicodeExtraFields != UnicodeExtraFieldPolicy.NEVER) {
+ addUnicodeExtraFields(ze, encodable, name);
+ }
+
+ offsets.put(ze, Long.valueOf(written));
+
+ writeOut(LFH_SIG);
+ written += WORD;
+
+ //store method in local variable to prevent multiple method calls
+ final int zipMethod = ze.getMethod();
+
+ writeVersionNeededToExtractAndGeneralPurposeBits(zipMethod,
+ !encodable
+ && fallbackToUTF8,
+ hasZip64Extra(ze));
+ written += WORD;
+
+ // compression method
+ writeOut(ZipShort.getBytes(zipMethod));
+ written += SHORT;
+
+ // last mod. time and date
+ writeOut(ZipUtil.toDosTime(ze.getTime()));
+ written += WORD;
+
+ // CRC
+ // compressed length
+ // uncompressed length
+ entry.localDataStart = written;
+ if (zipMethod == DEFLATED || raf != null) {
+ writeOut(LZERO);
+ if (hasZip64Extra(entry.entry)) {
+ // point to ZIP64 extended information extra field for
+ // sizes, may get rewritten once sizes are known if
+ // stream is seekable
+ writeOut(ZipLong.ZIP64_MAGIC.getBytes());
+ writeOut(ZipLong.ZIP64_MAGIC.getBytes());
+ } else {
+ writeOut(LZERO);
+ writeOut(LZERO);
+ }
+ } else {
+ writeOut(ZipLong.getBytes(ze.getCrc()));
+ byte[] size = ZipLong.ZIP64_MAGIC.getBytes();
+ if (!hasZip64Extra(ze)) {
+ size = ZipLong.getBytes(ze.getSize());
+ }
+ writeOut(size);
+ writeOut(size);
+ }
+ // CheckStyle:MagicNumber OFF
+ written += 12;
+ // CheckStyle:MagicNumber ON
+
+ // file name length
+ writeOut(ZipShort.getBytes(name.limit()));
+ written += SHORT;
+
+ // extra field length
+ byte[] extra = ze.getLocalFileDataExtra();
+ writeOut(ZipShort.getBytes(extra.length));
+ written += SHORT;
+
+ // file name
+ writeOut(name.array(), name.arrayOffset(),
+ name.limit() - name.position());
+ written += name.limit();
+
+ // extra field
+ writeOut(extra);
+ written += extra.length;
+
+ entry.dataStart = written;
+ }
+
+ /**
+ * Adds UnicodeExtra fields for name and file comment if mode is
+ * ALWAYS or the data cannot be encoded using the configured
+ * encoding.
+ */
+ private void addUnicodeExtraFields(ZipArchiveEntry ze, boolean encodable,
+ ByteBuffer name)
+ throws IOException {
+ if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS
+ || !encodable) {
+ ze.addExtraField(new UnicodePathExtraField(ze.getName(),
+ name.array(),
+ name.arrayOffset(),
+ name.limit()
+ - name.position()));
+ }
+
+ String comm = ze.getComment();
+ if (comm != null && !"".equals(comm)) {
+
+ boolean commentEncodable = zipEncoding.canEncode(comm);
+
+ if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS
+ || !commentEncodable) {
+ ByteBuffer commentB = getEntryEncoding(ze).encode(comm);
+ ze.addExtraField(new UnicodeCommentExtraField(comm,
+ commentB.array(),
+ commentB.arrayOffset(),
+ commentB.limit()
+ - commentB.position())
+ );
+ }
+ }
+ }
+
+ /**
+ * Writes the data descriptor entry.
+ * @param ze the entry to write
+ * @throws IOException on error
+ */
+ protected void writeDataDescriptor(ZipArchiveEntry ze) throws IOException {
+ if (ze.getMethod() != DEFLATED || raf != null) {
+ return;
+ }
+ writeOut(DD_SIG);
+ writeOut(ZipLong.getBytes(ze.getCrc()));
+ int sizeFieldSize = WORD;
+ if (!hasZip64Extra(ze)) {
+ writeOut(ZipLong.getBytes(ze.getCompressedSize()));
+ writeOut(ZipLong.getBytes(ze.getSize()));
+ } else {
+ sizeFieldSize = DWORD;
+ writeOut(ZipEightByteInteger.getBytes(ze.getCompressedSize()));
+ writeOut(ZipEightByteInteger.getBytes(ze.getSize()));
+ }
+ written += 2 * WORD + 2 * sizeFieldSize;
+ }
+
+ /**
+ * Writes the central file header entry.
+ * @param ze the entry to write
+ * @throws IOException on error
+ * @throws Zip64RequiredException if the archive's size exceeds 4
+ * GByte and {@link Zip64Mode #setUseZip64} is {@link
+ * Zip64Mode#Never}.
+ */
+ protected void writeCentralFileHeader(ZipArchiveEntry ze) throws IOException {
+ writeOut(CFH_SIG);
+ written += WORD;
+
+ final long lfhOffset = offsets.get(ze).longValue();
+ final boolean needsZip64Extra = hasZip64Extra(ze)
+ || ze.getCompressedSize() >= ZIP64_MAGIC
+ || ze.getSize() >= ZIP64_MAGIC
+ || lfhOffset >= ZIP64_MAGIC;
+
+ if (needsZip64Extra && zip64Mode == Zip64Mode.Never) {
+ // must be the offset that is too big, otherwise an
+ // exception would have been throw in putArchiveEntry or
+ // closeArchiveEntry
+ throw new Zip64RequiredException(Zip64RequiredException
+ .ARCHIVE_TOO_BIG_MESSAGE);
+ }
+
+ handleZip64Extra(ze, lfhOffset, needsZip64Extra);
+
+ // version made by
+ // CheckStyle:MagicNumber OFF
+ writeOut(ZipShort.getBytes((ze.getPlatform() << 8) |
+ (!hasUsedZip64 ? DATA_DESCRIPTOR_MIN_VERSION
+ : ZIP64_MIN_VERSION)));
+ written += SHORT;
+
+ final int zipMethod = ze.getMethod();
+ final boolean encodable = zipEncoding.canEncode(ze.getName());
+ writeVersionNeededToExtractAndGeneralPurposeBits(zipMethod,
+ !encodable
+ && fallbackToUTF8,
+ needsZip64Extra);
+ written += WORD;
+
+ // compression method
+ writeOut(ZipShort.getBytes(zipMethod));
+ written += SHORT;
+
+ // last mod. time and date
+ writeOut(ZipUtil.toDosTime(ze.getTime()));
+ written += WORD;
+
+ // CRC
+ // compressed length
+ // uncompressed length
+ writeOut(ZipLong.getBytes(ze.getCrc()));
+ if (ze.getCompressedSize() >= ZIP64_MAGIC
+ || ze.getSize() >= ZIP64_MAGIC) {
+ writeOut(ZipLong.ZIP64_MAGIC.getBytes());
+ writeOut(ZipLong.ZIP64_MAGIC.getBytes());
+ } else {
+ writeOut(ZipLong.getBytes(ze.getCompressedSize()));
+ writeOut(ZipLong.getBytes(ze.getSize()));
+ }
+ // CheckStyle:MagicNumber OFF
+ written += 12;
+ // CheckStyle:MagicNumber ON
+
+ ByteBuffer name = getName(ze);
+
+ writeOut(ZipShort.getBytes(name.limit()));
+ written += SHORT;
+
+ // extra field length
+ byte[] extra = ze.getCentralDirectoryExtra();
+ writeOut(ZipShort.getBytes(extra.length));
+ written += SHORT;
+
+ // file comment length
+ String comm = ze.getComment();
+ if (comm == null) {
+ comm = "";
+ }
+
+ ByteBuffer commentB = getEntryEncoding(ze).encode(comm);
+
+ writeOut(ZipShort.getBytes(commentB.limit()));
+ written += SHORT;
+
+ // disk number start
+ writeOut(ZERO);
+ written += SHORT;
+
+ // internal file attributes
+ writeOut(ZipShort.getBytes(ze.getInternalAttributes()));
+ written += SHORT;
+
+ // external file attributes
+ writeOut(ZipLong.getBytes(ze.getExternalAttributes()));
+ written += WORD;
+
+ // relative offset of LFH
+ writeOut(ZipLong.getBytes(Math.min(lfhOffset, ZIP64_MAGIC)));
+ written += WORD;
+
+ // file name
+ writeOut(name.array(), name.arrayOffset(),
+ name.limit() - name.position());
+ written += name.limit();
+
+ // extra field
+ writeOut(extra);
+ written += extra.length;
+
+ // file comment
+ writeOut(commentB.array(), commentB.arrayOffset(),
+ commentB.limit() - commentB.position());
+ written += commentB.limit();
+ }
+
+ /**
+ * If the entry needs Zip64 extra information inside the central
+ * directory then configure its data.
+ */
+ private void handleZip64Extra(ZipArchiveEntry ze, long lfhOffset,
+ boolean needsZip64Extra) {
+ if (needsZip64Extra) {
+ Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze);
+ if (ze.getCompressedSize() >= ZIP64_MAGIC
+ || ze.getSize() >= ZIP64_MAGIC) {
+ z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize()));
+ z64.setSize(new ZipEightByteInteger(ze.getSize()));
+ } else {
+ // reset value that may have been set for LFH
+ z64.setCompressedSize(null);
+ z64.setSize(null);
+ }
+ if (lfhOffset >= ZIP64_MAGIC) {
+ z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset));
+ }
+ ze.setExtra();
+ }
+ }
+
+ /**
+ * Writes the "End of central dir record".
+ * @throws IOException on error
+ * @throws Zip64RequiredException if the archive's size exceeds 4
+ * GByte or there are more than 65535 entries inside the archive
+ * and {@link Zip64Mode #setUseZip64} is {@link Zip64Mode#Never}.
+ */
+ protected void writeCentralDirectoryEnd() throws IOException {
+ writeOut(EOCD_SIG);
+
+ // disk numbers
+ writeOut(ZERO);
+ writeOut(ZERO);
+
+ // number of entries
+ int numberOfEntries = entries.size();
+ if (numberOfEntries > ZIP64_MAGIC_SHORT
+ && zip64Mode == Zip64Mode.Never) {
+ throw new Zip64RequiredException(Zip64RequiredException
+ .TOO_MANY_ENTRIES_MESSAGE);
+ }
+ if (cdOffset > ZIP64_MAGIC && zip64Mode == Zip64Mode.Never) {
+ throw new Zip64RequiredException(Zip64RequiredException
+ .ARCHIVE_TOO_BIG_MESSAGE);
+ }
+
+ byte[] num = ZipShort.getBytes(Math.min(numberOfEntries,
+ ZIP64_MAGIC_SHORT));
+ writeOut(num);
+ writeOut(num);
+
+ // length and location of CD
+ writeOut(ZipLong.getBytes(Math.min(cdLength, ZIP64_MAGIC)));
+ writeOut(ZipLong.getBytes(Math.min(cdOffset, ZIP64_MAGIC)));
+
+ // ZIP file comment
+ ByteBuffer data = this.zipEncoding.encode(comment);
+ writeOut(ZipShort.getBytes(data.limit()));
+ writeOut(data.array(), data.arrayOffset(),
+ data.limit() - data.position());
+ }
+
+ private static final byte[] ONE = ZipLong.getBytes(1L);
+
+ /**
+ * Writes the "ZIP64 End of central dir record" and
+ * "ZIP64 End of central dir locator".
+ * @throws IOException on error
+ * @since 1.3
+ */
+ protected void writeZip64CentralDirectory() throws IOException {
+ if (zip64Mode == Zip64Mode.Never) {
+ return;
+ }
+
+ if (!hasUsedZip64
+ && (cdOffset >= ZIP64_MAGIC || cdLength >= ZIP64_MAGIC
+ || entries.size() >= ZIP64_MAGIC_SHORT)) {
+ // actually "will use"
+ hasUsedZip64 = true;
+ }
+
+ if (!hasUsedZip64) {
+ return;
+ }
+
+ long offset = written;
+
+ writeOut(ZIP64_EOCD_SIG);
+ // size, we don't have any variable length as we don't support
+ // the extensible data sector, yet
+ writeOut(ZipEightByteInteger
+ .getBytes(SHORT /* version made by */
+ + SHORT /* version needed to extract */
+ + WORD /* disk number */
+ + WORD /* disk with central directory */
+ + DWORD /* number of entries in CD on this disk */
+ + DWORD /* total number of entries */
+ + DWORD /* size of CD */
+ + DWORD /* offset of CD */
+ ));
+
+ // version made by and version needed to extract
+ writeOut(ZipShort.getBytes(ZIP64_MIN_VERSION));
+ writeOut(ZipShort.getBytes(ZIP64_MIN_VERSION));
+
+ // disk numbers - four bytes this time
+ writeOut(LZERO);
+ writeOut(LZERO);
+
+ // number of entries
+ byte[] num = ZipEightByteInteger.getBytes(entries.size());
+ writeOut(num);
+ writeOut(num);
+
+ // length and location of CD
+ writeOut(ZipEightByteInteger.getBytes(cdLength));
+ writeOut(ZipEightByteInteger.getBytes(cdOffset));
+
+ // no "zip64 extensible data sector" for now
+
+ // and now the "ZIP64 end of central directory locator"
+ writeOut(ZIP64_EOCD_LOC_SIG);
+
+ // disk number holding the ZIP64 EOCD record
+ writeOut(LZERO);
+ // relative offset of ZIP64 EOCD record
+ writeOut(ZipEightByteInteger.getBytes(offset));
+ // total number of disks
+ writeOut(ONE);
+ }
+
+ /**
+ * Write bytes to output or random access file.
+ * @param data the byte array to write
+ * @throws IOException on error
+ */
+ protected final void writeOut(byte[] data) throws IOException {
+ writeOut(data, 0, data.length);
+ }
+
+ /**
+ * Write bytes to output or random access file.
+ * @param data the byte array to write
+ * @param offset the start position to write from
+ * @param length the number of bytes to write
+ * @throws IOException on error
+ */
+ protected final void writeOut(byte[] data, int offset, int length)
+ throws IOException {
+ if (raf != null) {
+ raf.write(data, offset, length);
+ } else {
+ out.write(data, offset, length);
+ }
+ }
+
+ private void deflateUntilInputIsNeeded() throws IOException {
+ while (!def.needsInput()) {
+ deflate();
+ }
+ }
+
+ private void writeVersionNeededToExtractAndGeneralPurposeBits(final int
+ zipMethod,
+ final boolean
+ utfFallback,
+ final boolean
+ zip64)
+ throws IOException {
+
+ // CheckStyle:MagicNumber OFF
+ int versionNeededToExtract = INITIAL_VERSION;
+ GeneralPurposeBit b = new GeneralPurposeBit();
+ b.useUTF8ForNames(useUTF8Flag || utfFallback);
+ if (zipMethod == DEFLATED && raf == null) {
+ // requires version 2 as we are going to store length info
+ // in the data descriptor
+ versionNeededToExtract = DATA_DESCRIPTOR_MIN_VERSION;
+ b.useDataDescriptor(true);
+ }
+ if (zip64) {
+ versionNeededToExtract = ZIP64_MIN_VERSION;
+ }
+ // CheckStyle:MagicNumber ON
+
+ // version needed to extract
+ writeOut(ZipShort.getBytes(versionNeededToExtract));
+ // general purpose bit flag
+ writeOut(b.encode());
+ }
+
+ /**
+ * Creates a new zip entry taking some information from the given
+ * file and using the provided name.
+ *
+ * The name will be adjusted to end with a forward slash "/" if
+ * the file is a directory. If the file is not a directory a
+ * potential trailing forward slash will be stripped from the
+ * entry name.
+ *
+ * Must not be used if the stream has already been closed.
+ */
+ @Override
+ public ArchiveEntry createArchiveEntry(File inputFile, String entryName)
+ throws IOException {
+ if (finished) {
+ throw new IOException("Stream has already been finished");
+ }
+ return new ZipArchiveEntry(inputFile, entryName);
+ }
+
+ /**
+ * Get the existing ZIP64 extended information extra field or
+ * create a new one and add it to the entry.
+ *
+ * @since 1.3
+ */
+ private Zip64ExtendedInformationExtraField
+ getZip64Extra(ZipArchiveEntry ze) {
+ if (entry != null) {
+ entry.causedUseOfZip64 = !hasUsedZip64;
+ }
+ hasUsedZip64 = true;
+ Zip64ExtendedInformationExtraField z64 =
+ (Zip64ExtendedInformationExtraField)
+ ze.getExtraField(Zip64ExtendedInformationExtraField
+ .HEADER_ID);
+ if (z64 == null) {
+ /*
+ System.err.println("Adding z64 for " + ze.getName()
+ + ", method: " + ze.getMethod()
+ + " (" + (ze.getMethod() == STORED) + ")"
+ + ", raf: " + (raf != null));
+ */
+ z64 = new Zip64ExtendedInformationExtraField();
+ }
+
+ // even if the field is there already, make sure it is the first one
+ ze.addAsFirstExtraField(z64);
+
+ return z64;
+ }
+
+ /**
+ * Is there a ZIP64 extended information extra field for the
+ * entry?
+ *
+ * @since 1.3
+ */
+ private boolean hasZip64Extra(ZipArchiveEntry ze) {
+ return ze.getExtraField(Zip64ExtendedInformationExtraField
+ .HEADER_ID)
+ != null;
+ }
+
+ /**
+ * If the mode is AsNeeded and the entry is a compressed entry of
+ * unknown size that gets written to a non-seekable stream the
+ * change the default to Never.
+ *
+ * @since 1.3
+ */
+ private Zip64Mode getEffectiveZip64Mode(ZipArchiveEntry ze) {
+ if (zip64Mode != Zip64Mode.AsNeeded
+ || raf != null
+ || ze.getMethod() != DEFLATED
+ || ze.getSize() != ArchiveEntry.SIZE_UNKNOWN) {
+ return zip64Mode;
+ }
+ return Zip64Mode.Never;
+ }
+
+ private ZipEncoding getEntryEncoding(ZipArchiveEntry ze) {
+ boolean encodable = zipEncoding.canEncode(ze.getName());
+ return !encodable && fallbackToUTF8
+ ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding;
+ }
+
+ private ByteBuffer getName(ZipArchiveEntry ze) throws IOException {
+ return getEntryEncoding(ze).encode(ze.getName());
+ }
+
+ /**
+ * Closes the underlying stream/file without finishing the
+ * archive, the result will likely be a corrupt archive.
+ *
+ * This method only exists to support tests that generate
+ * corrupt archives so they can clean up any temporary files.
+ */
+ void destroy() throws IOException {
+ if (raf != null) {
+ raf.close();
+ }
+ if (out != null) {
+ out.close();
+ }
+ }
+
+ /**
+ * enum that represents the possible policies for creating Unicode
+ * extra fields.
+ */
+ public static final class UnicodeExtraFieldPolicy {
+ /**
+ * Always create Unicode extra fields.
+ */
+ public static final UnicodeExtraFieldPolicy ALWAYS = new UnicodeExtraFieldPolicy("always");
+ /**
+ * Never create Unicode extra fields.
+ */
+ public static final UnicodeExtraFieldPolicy NEVER = new UnicodeExtraFieldPolicy("never");
+ /**
+ * Create Unicode extra fields for filenames that cannot be
+ * encoded using the specified encoding.
+ */
+ public static final UnicodeExtraFieldPolicy NOT_ENCODEABLE =
+ new UnicodeExtraFieldPolicy("not encodeable");
+
+ private final String name;
+ private UnicodeExtraFieldPolicy(String n) {
+ name = n;
+ }
+ @Override
+ public String toString() {
+ return name;
+ }
+ }
+
+ /**
+ * Structure collecting information for the entry that is
+ * currently being written.
+ */
+ private static final class CurrentEntry {
+ private CurrentEntry(ZipArchiveEntry entry) {
+ this.entry = entry;
+ }
+ /**
+ * Current ZIP entry.
+ */
+ private final ZipArchiveEntry entry;
+ /**
+ * Offset for CRC entry in the local file header data for the
+ * current entry starts here.
+ */
+ private long localDataStart = 0;
+ /**
+ * Data for local header data
+ */
+ private long dataStart = 0;
+ /**
+ * Number of bytes read for the current entry (can't rely on
+ * Deflater#getBytesRead) when using DEFLATED.
+ */
+ private long bytesRead = 0;
+ /**
+ * Whether current entry was the first one using ZIP64 features.
+ */
+ private boolean causedUseOfZip64 = false;
+ /**
+ * Whether write() has been called at all.
+ *
+ * In order to create a valid archive {@link
+ * #closeArchiveEntry closeArchiveEntry} will write an empty
+ * array to get the CRC right if nothing has been written to
+ * the stream at all.
+ */
+ private boolean hasWritten;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipConstants.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipConstants.java
new file mode 100644
index 000000000..b6e2d4536
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipConstants.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+/**
+ * Various constants used throughout the package.
+ *
+ * @since 1.3
+ */
+final class ZipConstants {
+ private ZipConstants() { }
+
+ /** Masks last eight bits */
+ static final int BYTE_MASK = 0xFF;
+
+ /** length of a ZipShort in bytes */
+ static final int SHORT = 2;
+
+ /** length of a ZipLong in bytes */
+ static final int WORD = 4;
+
+ /** length of a ZipEightByteInteger in bytes */
+ static final int DWORD = 8;
+
+ /** Initial ZIP specification version */
+ static final int INITIAL_VERSION = 10;
+
+ /** ZIP specification version that introduced data descriptor method */
+ static final int DATA_DESCRIPTOR_MIN_VERSION = 20;
+
+ /** ZIP specification version that introduced ZIP64 */
+ static final int ZIP64_MIN_VERSION = 45;
+
+ /**
+ * Value stored in two-byte size and similar fields if ZIP64
+ * extensions are used.
+ */
+ static final int ZIP64_MAGIC_SHORT = 0xFFFF;
+
+ /**
+ * Value stored in four-byte size and similar fields if ZIP64
+ * extensions are used.
+ */
+ static final long ZIP64_MAGIC = 0xFFFFFFFFL;
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipEightByteInteger.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipEightByteInteger.java
new file mode 100644
index 000000000..0803e6508
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipEightByteInteger.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.Serializable;
+import java.math.BigInteger;
+
+import static org.apache.commons.compress.archivers.zip.ZipConstants.BYTE_MASK;
+
+/**
+ * Utility class that represents an eight byte integer with conversion
+ * rules for the big endian byte order of ZIP files.
+ * @Immutable
+ *
+ * @since 1.2
+ */
+public final class ZipEightByteInteger implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private static final int BYTE_1 = 1;
+ private static final int BYTE_1_MASK = 0xFF00;
+ private static final int BYTE_1_SHIFT = 8;
+
+ private static final int BYTE_2 = 2;
+ private static final int BYTE_2_MASK = 0xFF0000;
+ private static final int BYTE_2_SHIFT = 16;
+
+ private static final int BYTE_3 = 3;
+ private static final long BYTE_3_MASK = 0xFF000000L;
+ private static final int BYTE_3_SHIFT = 24;
+
+ private static final int BYTE_4 = 4;
+ private static final long BYTE_4_MASK = 0xFF00000000L;
+ private static final int BYTE_4_SHIFT = 32;
+
+ private static final int BYTE_5 = 5;
+ private static final long BYTE_5_MASK = 0xFF0000000000L;
+ private static final int BYTE_5_SHIFT = 40;
+
+ private static final int BYTE_6 = 6;
+ private static final long BYTE_6_MASK = 0xFF000000000000L;
+ private static final int BYTE_6_SHIFT = 48;
+
+ private static final int BYTE_7 = 7;
+ private static final long BYTE_7_MASK = 0x7F00000000000000L;
+ private static final int BYTE_7_SHIFT = 56;
+
+ private static final int LEFTMOST_BIT_SHIFT = 63;
+ private static final byte LEFTMOST_BIT = (byte) 0x80;
+
+ private final BigInteger value;
+
+ public static final ZipEightByteInteger ZERO = new ZipEightByteInteger(0);
+
+ /**
+ * Create instance from a number.
+ * @param value the long to store as a ZipEightByteInteger
+ */
+ public ZipEightByteInteger(long value) {
+ this(BigInteger.valueOf(value));
+ }
+
+ /**
+ * Create instance from a number.
+ * @param value the BigInteger to store as a ZipEightByteInteger
+ */
+ public ZipEightByteInteger(BigInteger value) {
+ this.value = value;
+ }
+
+ /**
+ * Create instance from bytes.
+ * @param bytes the bytes to store as a ZipEightByteInteger
+ */
+ public ZipEightByteInteger (byte[] bytes) {
+ this(bytes, 0);
+ }
+
+ /**
+ * Create instance from the eight bytes starting at offset.
+ * @param bytes the bytes to store as a ZipEightByteInteger
+ * @param offset the offset to start
+ */
+ public ZipEightByteInteger (byte[] bytes, int offset) {
+ value = ZipEightByteInteger.getValue(bytes, offset);
+ }
+
+ /**
+ * Get value as eight bytes in big endian byte order.
+ * @return value as eight bytes in big endian order
+ */
+ public byte[] getBytes() {
+ return ZipEightByteInteger.getBytes(value);
+ }
+
+ /**
+ * Get value as Java long.
+ * @return value as a long
+ */
+ public long getLongValue() {
+ return value.longValue();
+ }
+
+ /**
+ * Get value as Java long.
+ * @return value as a long
+ */
+ public BigInteger getValue() {
+ return value;
+ }
+
+ /**
+ * Get value as eight bytes in big endian byte order.
+ * @param value the value to convert
+ * @return value as eight bytes in big endian byte order
+ */
+ public static byte[] getBytes(long value) {
+ return getBytes(BigInteger.valueOf(value));
+ }
+
+ /**
+ * Get value as eight bytes in big endian byte order.
+ * @param value the value to convert
+ * @return value as eight bytes in big endian byte order
+ */
+ public static byte[] getBytes(BigInteger value) {
+ byte[] result = new byte[8];
+ long val = value.longValue();
+ result[0] = (byte) ((val & BYTE_MASK));
+ result[BYTE_1] = (byte) ((val & BYTE_1_MASK) >> BYTE_1_SHIFT);
+ result[BYTE_2] = (byte) ((val & BYTE_2_MASK) >> BYTE_2_SHIFT);
+ result[BYTE_3] = (byte) ((val & BYTE_3_MASK) >> BYTE_3_SHIFT);
+ result[BYTE_4] = (byte) ((val & BYTE_4_MASK) >> BYTE_4_SHIFT);
+ result[BYTE_5] = (byte) ((val & BYTE_5_MASK) >> BYTE_5_SHIFT);
+ result[BYTE_6] = (byte) ((val & BYTE_6_MASK) >> BYTE_6_SHIFT);
+ result[BYTE_7] = (byte) ((val & BYTE_7_MASK) >> BYTE_7_SHIFT);
+ if (value.testBit(LEFTMOST_BIT_SHIFT)) {
+ result[BYTE_7] |= LEFTMOST_BIT;
+ }
+ return result;
+ }
+
+ /**
+ * Helper method to get the value as a Java long from eight bytes
+ * starting at given array offset
+ * @param bytes the array of bytes
+ * @param offset the offset to start
+ * @return the corresponding Java long value
+ */
+ public static long getLongValue(byte[] bytes, int offset) {
+ return getValue(bytes, offset).longValue();
+ }
+
+ /**
+ * Helper method to get the value as a Java BigInteger from eight
+ * bytes starting at given array offset
+ * @param bytes the array of bytes
+ * @param offset the offset to start
+ * @return the corresponding Java BigInteger value
+ */
+ public static BigInteger getValue(byte[] bytes, int offset) {
+ long value = ((long) bytes[offset + BYTE_7] << BYTE_7_SHIFT) & BYTE_7_MASK;
+ value += ((long) bytes[offset + BYTE_6] << BYTE_6_SHIFT) & BYTE_6_MASK;
+ value += ((long) bytes[offset + BYTE_5] << BYTE_5_SHIFT) & BYTE_5_MASK;
+ value += ((long) bytes[offset + BYTE_4] << BYTE_4_SHIFT) & BYTE_4_MASK;
+ value += ((long) bytes[offset + BYTE_3] << BYTE_3_SHIFT) & BYTE_3_MASK;
+ value += ((long) bytes[offset + BYTE_2] << BYTE_2_SHIFT) & BYTE_2_MASK;
+ value += ((long) bytes[offset + BYTE_1] << BYTE_1_SHIFT) & BYTE_1_MASK;
+ value += ((long) bytes[offset] & BYTE_MASK);
+ BigInteger val = BigInteger.valueOf(value);
+ return (bytes[offset + BYTE_7] & LEFTMOST_BIT) == LEFTMOST_BIT
+ ? val.setBit(LEFTMOST_BIT_SHIFT) : val;
+ }
+
+ /**
+ * Helper method to get the value as a Java long from an eight-byte array
+ * @param bytes the array of bytes
+ * @return the corresponding Java long value
+ */
+ public static long getLongValue(byte[] bytes) {
+ return getLongValue(bytes, 0);
+ }
+
+ /**
+ * Helper method to get the value as a Java long from an eight-byte array
+ * @param bytes the array of bytes
+ * @return the corresponding Java BigInteger value
+ */
+ public static BigInteger getValue(byte[] bytes) {
+ return getValue(bytes, 0);
+ }
+
+ /**
+ * Override to make two instances with same value equal.
+ * @param o an object to compare
+ * @return true if the objects are equal
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (o == null || !(o instanceof ZipEightByteInteger)) {
+ return false;
+ }
+ return value.equals(((ZipEightByteInteger) o).getValue());
+ }
+
+ /**
+ * Override to make two instances with same value equal.
+ * @return the hashCode of the value stored in the ZipEightByteInteger
+ */
+ @Override
+ public int hashCode() {
+ return value.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "ZipEightByteInteger value: " + value;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipEncoding.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipEncoding.java
new file mode 100644
index 000000000..65d2044b5
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipEncoding.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An interface for encoders that do a pretty encoding of ZIP
+ * filenames.
+ *
+ * There are mostly two implementations, one that uses java.nio
+ * {@link java.nio.charset.Charset Charset} and one implementation,
+ * which copes with simple 8 bit charsets, because java-1.4 did not
+ * support Cp437 in java.nio.
+ *
+ * The main reason for defining an own encoding layer comes from
+ * the problems with {@link java.lang.String#getBytes(String)
+ * String.getBytes}, which encodes unknown characters as ASCII
+ * quotation marks ('?'). Quotation marks are per definition an
+ * invalid filename on some operating systems like Windows, which
+ * leads to ignored ZIP entries.
+ *
+ * All implementations should implement this interface in a
+ * reentrant way.
+ */
+public interface ZipEncoding {
+ /**
+ * Check, whether the given string may be losslessly encoded using this
+ * encoding.
+ *
+ * @param name A filename or ZIP comment.
+ * @return Whether the given name may be encoded with out any losses.
+ */
+ boolean canEncode(String name);
+
+ /**
+ * Encode a filename or a comment to a byte array suitable for
+ * storing it to a serialized zip entry.
+ *
+ * Examples for CP 437 (in pseudo-notation, right hand side is
+ * C-style notation):
+ *
+ * encode("\u20AC_for_Dollar.txt") = "%U20AC_for_Dollar.txt"
+ * encode("\u00D6lf\u00E4sser.txt") = "\231lf\204sser.txt"
+ *
+ *
+ * @param name A filename or ZIP comment.
+ * @return A byte buffer with a backing array containing the
+ * encoded name. Unmappable characters or malformed
+ * character sequences are mapped to a sequence of utf-16
+ * words encoded in the format %Uxxxx. It is
+ * assumed, that the byte buffer is positioned at the
+ * beginning of the encoded result, the byte buffer has a
+ * backing array and the limit of the byte buffer points
+ * to the end of the encoded result.
+ * @throws IOException
+ */
+ ByteBuffer encode(String name) throws IOException;
+
+ /**
+ * @param data The byte values to decode.
+ * @return The decoded string.
+ * @throws IOException
+ */
+ String decode(byte [] data) throws IOException;
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipEncodingHelper.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipEncodingHelper.java
new file mode 100644
index 000000000..c0f1cfaef
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipEncodingHelper.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.commons.compress.archivers.zip;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.nio.charset.UnsupportedCharsetException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.compress.utils.Charsets;
+
+/**
+ * Static helper functions for robustly encoding filenames in zip files.
+ */
+public abstract class ZipEncodingHelper {
+
+ /**
+ * A class, which holds the high characters of a simple encoding
+ * and lazily instantiates a Simple8BitZipEncoding instance in a
+ * thread-safe manner.
+ */
+ private static class SimpleEncodingHolder {
+
+ private final char [] highChars;
+ private Simple8BitZipEncoding encoding;
+
+ /**
+ * Instantiate a simple encoding holder.
+ *
+ * @param highChars The characters for byte codes 128 to 255.
+ *
+ * @see Simple8BitZipEncoding#Simple8BitZipEncoding(char[])
+ */
+ SimpleEncodingHolder(char [] highChars) {
+ this.highChars = highChars;
+ }
+
+ /**
+ * @return The associated {@link Simple8BitZipEncoding}, which
+ * is instantiated if not done so far.
+ */
+ public synchronized Simple8BitZipEncoding getEncoding() {
+ if (this.encoding == null) {
+ this.encoding = new Simple8BitZipEncoding(this.highChars);
+ }
+ return this.encoding;
+ }
+ }
+
+ private static final Map simpleEncodings;
+
+ static {
+ Map se =
+ new HashMap();
+
+ char[] cp437_high_chars =
+ new char[] { 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0,
+ 0x00e5, 0x00e7, 0x00ea, 0x00eb, 0x00e8, 0x00ef,
+ 0x00ee, 0x00ec, 0x00c4, 0x00c5, 0x00c9, 0x00e6,
+ 0x00c6, 0x00f4, 0x00f6, 0x00f2, 0x00fb, 0x00f9,
+ 0x00ff, 0x00d6, 0x00dc, 0x00a2, 0x00a3, 0x00a5,
+ 0x20a7, 0x0192, 0x00e1, 0x00ed, 0x00f3, 0x00fa,
+ 0x00f1, 0x00d1, 0x00aa, 0x00ba, 0x00bf, 0x2310,
+ 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00bb,
+ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561,
+ 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557,
+ 0x255d, 0x255c, 0x255b, 0x2510, 0x2514, 0x2534,
+ 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f,
+ 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550,
+ 0x256c, 0x2567, 0x2568, 0x2564, 0x2565, 0x2559,
+ 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518,
+ 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580,
+ 0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3,
+ 0x00b5, 0x03c4, 0x03a6, 0x0398, 0x03a9, 0x03b4,
+ 0x221e, 0x03c6, 0x03b5, 0x2229, 0x2261, 0x00b1,
+ 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248,
+ 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2,
+ 0x25a0, 0x00a0 };
+
+ SimpleEncodingHolder cp437 = new SimpleEncodingHolder(cp437_high_chars);
+
+ se.put("CP437", cp437);
+ se.put("Cp437", cp437);
+ se.put("cp437", cp437);
+ se.put("IBM437", cp437);
+ se.put("ibm437", cp437);
+
+ char[] cp850_high_chars =
+ new char[] { 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0,
+ 0x00e5, 0x00e7, 0x00ea, 0x00eb, 0x00e8, 0x00ef,
+ 0x00ee, 0x00ec, 0x00c4, 0x00c5, 0x00c9, 0x00e6,
+ 0x00c6, 0x00f4, 0x00f6, 0x00f2, 0x00fb, 0x00f9,
+ 0x00ff, 0x00d6, 0x00dc, 0x00f8, 0x00a3, 0x00d8,
+ 0x00d7, 0x0192, 0x00e1, 0x00ed, 0x00f3, 0x00fa,
+ 0x00f1, 0x00d1, 0x00aa, 0x00ba, 0x00bf, 0x00ae,
+ 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00bb,
+ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x00c1,
+ 0x00c2, 0x00c0, 0x00a9, 0x2563, 0x2551, 0x2557,
+ 0x255d, 0x00a2, 0x00a5, 0x2510, 0x2514, 0x2534,
+ 0x252c, 0x251c, 0x2500, 0x253c, 0x00e3, 0x00c3,
+ 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550,
+ 0x256c, 0x00a4, 0x00f0, 0x00d0, 0x00ca, 0x00cb,
+ 0x00c8, 0x0131, 0x00cd, 0x00ce, 0x00cf, 0x2518,
+ 0x250c, 0x2588, 0x2584, 0x00a6, 0x00cc, 0x2580,
+ 0x00d3, 0x00df, 0x00d4, 0x00d2, 0x00f5, 0x00d5,
+ 0x00b5, 0x00fe, 0x00de, 0x00da, 0x00db, 0x00d9,
+ 0x00fd, 0x00dd, 0x00af, 0x00b4, 0x00ad, 0x00b1,
+ 0x2017, 0x00be, 0x00b6, 0x00a7, 0x00f7, 0x00b8,
+ 0x00b0, 0x00a8, 0x00b7, 0x00b9, 0x00b3, 0x00b2,
+ 0x25a0, 0x00a0 };
+
+ SimpleEncodingHolder cp850 = new SimpleEncodingHolder(cp850_high_chars);
+
+ se.put("CP850", cp850);
+ se.put("Cp850", cp850);
+ se.put("cp850", cp850);
+ se.put("IBM850", cp850);
+ se.put("ibm850", cp850);
+ simpleEncodings = Collections.unmodifiableMap(se);
+ }
+
+ /**
+ * Grow a byte buffer, so it has a minimal capacity or at least
+ * the double capacity of the original buffer
+ *
+ * @param b The original buffer.
+ * @param newCapacity The minimal requested new capacity.
+ * @return A byte buffer r with
+ * r.capacity() = max(b.capacity()*2,newCapacity) and
+ * all the data contained in b copied to the beginning
+ * of r.
+ *
+ */
+ static ByteBuffer growBuffer(ByteBuffer b, int newCapacity) {
+ b.limit(b.position());
+ b.rewind();
+
+ int c2 = b.capacity() * 2;
+ ByteBuffer on = ByteBuffer.allocate(c2 < newCapacity ? newCapacity : c2);
+
+ on.put(b);
+ return on;
+ }
+
+
+ /**
+ * The hexadecimal digits 0,...,9,A,...,F encoded as
+ * ASCII bytes.
+ */
+ private static final byte[] HEX_DIGITS =
+ new byte [] {
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x41,
+ 0x42, 0x43, 0x44, 0x45, 0x46
+ };
+
+ /**
+ * Append %Uxxxx to the given byte buffer.
+ * The caller must assure, that bb.remaining()>=6.
+ *
+ * @param bb The byte buffer to write to.
+ * @param c The character to write.
+ */
+ static void appendSurrogate(ByteBuffer bb, char c) {
+
+ bb.put((byte) '%');
+ bb.put((byte) 'U');
+
+ bb.put(HEX_DIGITS[(c >> 12)&0x0f]);
+ bb.put(HEX_DIGITS[(c >> 8)&0x0f]);
+ bb.put(HEX_DIGITS[(c >> 4)&0x0f]);
+ bb.put(HEX_DIGITS[c & 0x0f]);
+ }
+
+
+ /**
+ * name of the encoding UTF-8
+ */
+ static final String UTF8 = "UTF8";
+
+ /**
+ * name of the encoding UTF-8
+ */
+ static final ZipEncoding UTF8_ZIP_ENCODING = new FallbackZipEncoding(UTF8);
+
+ /**
+ * Instantiates a zip encoding.
+ *
+ * @param name The name of the zip encoding. Specify {@code null} for
+ * the platform's default encoding.
+ * @return A zip encoding for the given encoding name.
+ */
+ public static ZipEncoding getZipEncoding(String name) {
+
+ // fallback encoding is good enough for UTF-8.
+ if (isUTF8(name)) {
+ return UTF8_ZIP_ENCODING;
+ }
+
+ if (name == null) {
+ return new FallbackZipEncoding();
+ }
+
+ SimpleEncodingHolder h = simpleEncodings.get(name);
+
+ if (h!=null) {
+ return h.getEncoding();
+ }
+
+ try {
+
+ Charset cs = Charset.forName(name);
+ return new NioZipEncoding(cs);
+
+ } catch (UnsupportedCharsetException e) {
+ return new FallbackZipEncoding(name);
+ }
+ }
+
+ /**
+ * Returns whether a given encoding is UTF-8. If the given name is null, then check the platform's default encoding.
+ *
+ * @param charsetName
+ * If the given name is null, then check the platform's default encoding.
+ */
+ static boolean isUTF8(String charsetName) {
+ if (charsetName == null) {
+ // check platform's default encoding
+ charsetName = System.getProperty("file.encoding");
+ }
+ if (Charsets.UTF_8.name().equalsIgnoreCase(charsetName)) {
+ return true;
+ }
+ for (String alias : Charsets.UTF_8.aliases()) {
+ if (alias.equalsIgnoreCase(charsetName)) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipExtraField.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipExtraField.java
new file mode 100644
index 000000000..07d64a189
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipExtraField.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.util.zip.ZipException;
+
+/**
+ * General format of extra field data.
+ *
+ * Extra fields usually appear twice per file, once in the local
+ * file data and once in the central directory. Usually they are the
+ * same, but they don't have to be. {@link
+ * java.util.zip.ZipOutputStream java.util.zip.ZipOutputStream} will
+ * only use the local file data in both places.
+ *
+ */
+public interface ZipExtraField {
+ /**
+ * The Header-ID.
+ *
+ * @return The HeaderId value
+ */
+ ZipShort getHeaderId();
+
+ /**
+ * Length of the extra field in the local file data - without
+ * Header-ID or length specifier.
+ * @return the length of the field in the local file data
+ */
+ ZipShort getLocalFileDataLength();
+
+ /**
+ * Length of the extra field in the central directory - without
+ * Header-ID or length specifier.
+ * @return the length of the field in the central directory
+ */
+ ZipShort getCentralDirectoryLength();
+
+ /**
+ * The actual data to put into local file data - without Header-ID
+ * or length specifier.
+ * @return the data
+ */
+ byte[] getLocalFileDataData();
+
+ /**
+ * The actual data to put into central directory - without Header-ID or
+ * length specifier.
+ * @return the data
+ */
+ byte[] getCentralDirectoryData();
+
+ /**
+ * Populate data from this array as if it was in local file data.
+ *
+ * @param buffer the buffer to read data from
+ * @param offset offset into buffer to read data
+ * @param length the length of data
+ * @exception ZipException on error
+ */
+ void parseFromLocalFileData(byte[] buffer, int offset, int length)
+ throws ZipException;
+
+ /**
+ * Populate data from this array as if it was in central directory data.
+ *
+ * @param buffer the buffer to read data from
+ * @param offset offset into buffer to read data
+ * @param length the length of data
+ * @exception ZipException on error
+ */
+ void parseFromCentralDirectoryData(byte[] buffer, int offset, int length)
+ throws ZipException;
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipFile.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipFile.java
new file mode 100644
index 000000000..bcde9c841
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipFile.java
@@ -0,0 +1,1083 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.BufferedInputStream;
+import java.io.Closeable;
+import java.io.EOFException;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.zip.Inflater;
+import java.util.zip.InflaterInputStream;
+import java.util.zip.ZipException;
+
+import org.apache.commons.compress.utils.IOUtils;
+
+import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC_SHORT;
+
+/**
+ * Replacement for java.util.ZipFile.
+ *
+ * This class adds support for file name encodings other than UTF-8
+ * (which is required to work on ZIP files created by native zip tools
+ * and is able to skip a preamble like the one found in self
+ * extracting archives. Furthermore it returns instances of
+ * org.apache.commons.compress.archivers.zip.ZipArchiveEntry
+ * instead of java.util.zip.ZipEntry.
+ *
+ * It doesn't extend java.util.zip.ZipFile as it would
+ * have to reimplement all methods anyway. Like
+ * java.util.ZipFile, it uses RandomAccessFile under the
+ * covers and supports compressed and uncompressed entries. As of
+ * Apache Commons Compress 1.3 it also transparently supports Zip64
+ * extensions and thus individual entries and archives larger than 4
+ * GB or with more than 65536 entries.
+ *
+ * The method signatures mimic the ones of
+ * java.util.zip.ZipFile, with a couple of exceptions:
+ *
+ *
+ * - There is no getName method.
+ * - entries has been renamed to getEntries.
+ * - getEntries and getEntry return
+ *
org.apache.commons.compress.archivers.zip.ZipArchiveEntry
+ * instances.
+ * - close is allowed to throw IOException.
+ *
+ *
+ */
+public class ZipFile implements Closeable {
+ private static final int HASH_SIZE = 509;
+ static final int NIBLET_MASK = 0x0f;
+ static final int BYTE_SHIFT = 8;
+ private static final int POS_0 = 0;
+ private static final int POS_1 = 1;
+ private static final int POS_2 = 2;
+ private static final int POS_3 = 3;
+
+ /**
+ * List of entries in the order they appear inside the central
+ * directory.
+ */
+ private final List entries =
+ new LinkedList();
+
+ /**
+ * Maps String to list of ZipArchiveEntrys, name -> actual entries.
+ */
+ private final Map> nameMap =
+ new HashMap>(HASH_SIZE);
+
+ private static final class OffsetEntry {
+ private long headerOffset = -1;
+ private long dataOffset = -1;
+ }
+
+ /**
+ * The encoding to use for filenames and the file comment.
+ *
+ * For a list of possible values see http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html.
+ * Defaults to UTF-8.
+ */
+ private final String encoding;
+
+ /**
+ * The zip encoding to use for filenames and the file comment.
+ */
+ private final ZipEncoding zipEncoding;
+
+ /**
+ * File name of actual source.
+ */
+ private final String archiveName;
+
+ /**
+ * The actual data source.
+ */
+ private final RandomAccessFile archive;
+
+ /**
+ * Whether to look for and use Unicode extra fields.
+ */
+ private final boolean useUnicodeExtraFields;
+
+ /**
+ * Whether the file is closed.
+ */
+ private boolean closed;
+
+ // cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection)
+ private final byte[] DWORD_BUF = new byte[DWORD];
+ private final byte[] WORD_BUF = new byte[WORD];
+ private final byte[] CFH_BUF = new byte[CFH_LEN];
+ private final byte[] SHORT_BUF = new byte[SHORT];
+
+ /**
+ * Opens the given file for reading, assuming "UTF8" for file names.
+ *
+ * @param f the archive.
+ *
+ * @throws IOException if an error occurs while reading the file.
+ */
+ public ZipFile(File f) throws IOException {
+ this(f, ZipEncodingHelper.UTF8);
+ }
+
+ /**
+ * Opens the given file for reading, assuming "UTF8".
+ *
+ * @param name name of the archive.
+ *
+ * @throws IOException if an error occurs while reading the file.
+ */
+ public ZipFile(String name) throws IOException {
+ this(new File(name), ZipEncodingHelper.UTF8);
+ }
+
+ /**
+ * Opens the given file for reading, assuming the specified
+ * encoding for file names, scanning unicode extra fields.
+ *
+ * @param name name of the archive.
+ * @param encoding the encoding to use for file names, use null
+ * for the platform's default encoding
+ *
+ * @throws IOException if an error occurs while reading the file.
+ */
+ public ZipFile(String name, String encoding) throws IOException {
+ this(new File(name), encoding, true);
+ }
+
+ /**
+ * Opens the given file for reading, assuming the specified
+ * encoding for file names and scanning for unicode extra fields.
+ *
+ * @param f the archive.
+ * @param encoding the encoding to use for file names, use null
+ * for the platform's default encoding
+ *
+ * @throws IOException if an error occurs while reading the file.
+ */
+ public ZipFile(File f, String encoding) throws IOException {
+ this(f, encoding, true);
+ }
+
+ /**
+ * Opens the given file for reading, assuming the specified
+ * encoding for file names.
+ *
+ * @param f the archive.
+ * @param encoding the encoding to use for file names, use null
+ * for the platform's default encoding
+ * @param useUnicodeExtraFields whether to use InfoZIP Unicode
+ * Extra Fields (if present) to set the file names.
+ *
+ * @throws IOException if an error occurs while reading the file.
+ */
+ public ZipFile(File f, String encoding, boolean useUnicodeExtraFields)
+ throws IOException {
+ this.archiveName = f.getAbsolutePath();
+ this.encoding = encoding;
+ this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
+ this.useUnicodeExtraFields = useUnicodeExtraFields;
+ archive = new RandomAccessFile(f, "r");
+ boolean success = false;
+ try {
+ Map entriesWithoutUTF8Flag =
+ populateFromCentralDirectory();
+ resolveLocalFileHeaderData(entriesWithoutUTF8Flag);
+ success = true;
+ } finally {
+ if (!success) {
+ closed = true;
+ IOUtils.closeQuietly(archive);
+ }
+ }
+ }
+
+ /**
+ * The encoding to use for filenames and the file comment.
+ *
+ * @return null if using the platform's default character encoding.
+ */
+ public String getEncoding() {
+ return encoding;
+ }
+
+ /**
+ * Closes the archive.
+ * @throws IOException if an error occurs closing the archive.
+ */
+ public void close() throws IOException {
+ // this flag is only written here and read in finalize() which
+ // can never be run in parallel.
+ // no synchronization needed.
+ closed = true;
+
+ archive.close();
+ }
+
+ /**
+ * close a zipfile quietly; throw no io fault, do nothing
+ * on a null parameter
+ * @param zipfile file to close, can be null
+ */
+ public static void closeQuietly(ZipFile zipfile) {
+ IOUtils.closeQuietly(zipfile);
+ }
+
+ /**
+ * Returns all entries.
+ *
+ * Entries will be returned in the same order they appear
+ * within the archive's central directory.
+ *
+ * @return all entries as {@link ZipArchiveEntry} instances
+ */
+ public Enumeration getEntries() {
+ return Collections.enumeration(entries);
+ }
+
+ /**
+ * Returns all entries in physical order.
+ *
+ * Entries will be returned in the same order their contents
+ * appear within the archive.
+ *
+ * @return all entries as {@link ZipArchiveEntry} instances
+ *
+ * @since 1.1
+ */
+ public Enumeration getEntriesInPhysicalOrder() {
+ ZipArchiveEntry[] allEntries = entries.toArray(new ZipArchiveEntry[0]);
+ Arrays.sort(allEntries, OFFSET_COMPARATOR);
+ return Collections.enumeration(Arrays.asList(allEntries));
+ }
+
+ /**
+ * Returns a named entry - or {@code null} if no entry by
+ * that name exists.
+ *
+ * If multiple entries with the same name exist the first entry
+ * in the archive's central directory by that name is
+ * returned.
+ *
+ * @param name name of the entry.
+ * @return the ZipArchiveEntry corresponding to the given name - or
+ * {@code null} if not present.
+ */
+ public ZipArchiveEntry getEntry(String name) {
+ LinkedList entriesOfThatName = nameMap.get(name);
+ return entriesOfThatName != null ? entriesOfThatName.getFirst() : null;
+ }
+
+ /**
+ * Returns all named entries in the same order they appear within
+ * the archive's central directory.
+ *
+ * @param name name of the entry.
+ * @return the Iterable<ZipArchiveEntry> corresponding to the
+ * given name
+ * @since 1.6
+ */
+ public Iterable getEntries(String name) {
+ List entriesOfThatName = nameMap.get(name);
+ return entriesOfThatName != null ? entriesOfThatName
+ : Collections.emptyList();
+ }
+
+ /**
+ * Returns all named entries in the same order their contents
+ * appear within the archive.
+ *
+ * @param name name of the entry.
+ * @return the Iterable<ZipArchiveEntry> corresponding to the
+ * given name
+ * @since 1.6
+ */
+ public Iterable getEntriesInPhysicalOrder(String name) {
+ ZipArchiveEntry[] entriesOfThatName = new ZipArchiveEntry[0];
+ if (nameMap.containsKey(name)) {
+ entriesOfThatName = nameMap.get(name).toArray(entriesOfThatName);
+ Arrays.sort(entriesOfThatName, OFFSET_COMPARATOR);
+ }
+ return Arrays.asList(entriesOfThatName);
+ }
+
+ /**
+ * Whether this class is able to read the given entry.
+ *
+ * May return false if it is set up to use encryption or a
+ * compression method that hasn't been implemented yet.
+ * @since 1.1
+ */
+ public boolean canReadEntryData(ZipArchiveEntry ze) {
+ return ZipUtil.canHandleEntryData(ze);
+ }
+
+ /**
+ * Returns an InputStream for reading the contents of the given entry.
+ *
+ * @param ze the entry to get the stream for.
+ * @return a stream to read the entry from.
+ * @throws IOException if unable to create an input stream from the zipentry
+ * @throws ZipException if the zipentry uses an unsupported feature
+ */
+ public InputStream getInputStream(ZipArchiveEntry ze)
+ throws IOException, ZipException {
+ if (!(ze instanceof Entry)) {
+ return null;
+ }
+ // cast valididty is checked just above
+ OffsetEntry offsetEntry = ((Entry) ze).getOffsetEntry();
+ ZipUtil.checkRequestedFeatures(ze);
+ long start = offsetEntry.dataOffset;
+ BoundedInputStream bis =
+ new BoundedInputStream(start, ze.getCompressedSize());
+ switch (ZipMethod.getMethodByCode(ze.getMethod())) {
+ case STORED:
+ return bis;
+ case UNSHRINKING:
+ return new UnshrinkingInputStream(bis);
+ case IMPLODING:
+ return new ExplodingInputStream(ze.getGeneralPurposeBit().getSlidingDictionarySize(),
+ ze.getGeneralPurposeBit().getNumberOfShannonFanoTrees(), new BufferedInputStream(bis));
+ case DEFLATED:
+ bis.addDummy();
+ final Inflater inflater = new Inflater(true);
+ return new InflaterInputStream(bis, inflater) {
+ @Override
+ public void close() throws IOException {
+ super.close();
+ inflater.end();
+ }
+ };
+ default:
+ throw new ZipException("Found unsupported compression method "
+ + ze.getMethod());
+ }
+ }
+
+ /**
+ *
+ * Convenience method to return the entry's content as a String if isUnixSymlink()
+ * returns true for it, otherwise returns null.
+ *
+ *
+ * This method assumes the symbolic link's file name uses the
+ * same encoding that as been specified for this ZipFile.
+ *
+ * @param entry ZipArchiveEntry object that represents the symbolic link
+ * @return entry's content as a String
+ * @throws IOException problem with content's input stream
+ * @since 1.5
+ */
+ public String getUnixSymlink(ZipArchiveEntry entry) throws IOException {
+ if (entry != null && entry.isUnixSymlink()) {
+ InputStream in = null;
+ try {
+ in = getInputStream(entry);
+ byte[] symlinkBytes = IOUtils.toByteArray(in);
+ return zipEncoding.decode(symlinkBytes);
+ } finally {
+ if (in != null) {
+ in.close();
+ }
+ }
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * Ensures that the close method of this zipfile is called when
+ * there are no more references to it.
+ * @see #close()
+ */
+ @Override
+ protected void finalize() throws Throwable {
+ try {
+ if (!closed) {
+ System.err.println("Cleaning up unclosed ZipFile for archive "
+ + archiveName);
+ close();
+ }
+ } finally {
+ super.finalize();
+ }
+ }
+
+ /**
+ * Length of a "central directory" entry structure without file
+ * name, extra fields or comment.
+ */
+ private static final int CFH_LEN =
+ /* version made by */ SHORT
+ /* version needed to extract */ + SHORT
+ /* general purpose bit flag */ + SHORT
+ /* compression method */ + SHORT
+ /* last mod file time */ + SHORT
+ /* last mod file date */ + SHORT
+ /* crc-32 */ + WORD
+ /* compressed size */ + WORD
+ /* uncompressed size */ + WORD
+ /* filename length */ + SHORT
+ /* extra field length */ + SHORT
+ /* file comment length */ + SHORT
+ /* disk number start */ + SHORT
+ /* internal file attributes */ + SHORT
+ /* external file attributes */ + WORD
+ /* relative offset of local header */ + WORD;
+
+ private static final long CFH_SIG =
+ ZipLong.getValue(ZipArchiveOutputStream.CFH_SIG);
+
+ /**
+ * Reads the central directory of the given archive and populates
+ * the internal tables with ZipArchiveEntry instances.
+ *
+ * The ZipArchiveEntrys will know all data that can be obtained from
+ * the central directory alone, but not the data that requires the
+ * local file header or additional data to be read.
+ *
+ * @return a map of zipentries that didn't have the language
+ * encoding flag set when read.
+ */
+ private Map populateFromCentralDirectory()
+ throws IOException {
+ HashMap noUTF8Flag =
+ new HashMap();
+
+ positionAtCentralDirectory();
+
+ archive.readFully(WORD_BUF);
+ long sig = ZipLong.getValue(WORD_BUF);
+
+ if (sig != CFH_SIG && startsWithLocalFileHeader()) {
+ throw new IOException("central directory is empty, can't expand"
+ + " corrupt archive.");
+ }
+
+ while (sig == CFH_SIG) {
+ readCentralDirectoryEntry(noUTF8Flag);
+ archive.readFully(WORD_BUF);
+ sig = ZipLong.getValue(WORD_BUF);
+ }
+ return noUTF8Flag;
+ }
+
+ /**
+ * Reads an individual entry of the central directory, creats an
+ * ZipArchiveEntry from it and adds it to the global maps.
+ *
+ * @param noUTF8Flag map used to collect entries that don't have
+ * their UTF-8 flag set and whose name will be set by data read
+ * from the local file header later. The current entry may be
+ * added to this map.
+ */
+ private void
+ readCentralDirectoryEntry(Map noUTF8Flag)
+ throws IOException {
+ archive.readFully(CFH_BUF);
+ int off = 0;
+ OffsetEntry offset = new OffsetEntry();
+ Entry ze = new Entry(offset);
+
+ int versionMadeBy = ZipShort.getValue(CFH_BUF, off);
+ off += SHORT;
+ ze.setPlatform((versionMadeBy >> BYTE_SHIFT) & NIBLET_MASK);
+
+ off += SHORT; // skip version info
+
+ final GeneralPurposeBit gpFlag = GeneralPurposeBit.parse(CFH_BUF, off);
+ final boolean hasUTF8Flag = gpFlag.usesUTF8ForNames();
+ final ZipEncoding entryEncoding =
+ hasUTF8Flag ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding;
+ ze.setGeneralPurposeBit(gpFlag);
+
+ off += SHORT;
+
+ ze.setMethod(ZipShort.getValue(CFH_BUF, off));
+ off += SHORT;
+
+ long time = ZipUtil.dosToJavaTime(ZipLong.getValue(CFH_BUF, off));
+ ze.setTime(time);
+ off += WORD;
+
+ ze.setCrc(ZipLong.getValue(CFH_BUF, off));
+ off += WORD;
+
+ ze.setCompressedSize(ZipLong.getValue(CFH_BUF, off));
+ off += WORD;
+
+ ze.setSize(ZipLong.getValue(CFH_BUF, off));
+ off += WORD;
+
+ int fileNameLen = ZipShort.getValue(CFH_BUF, off);
+ off += SHORT;
+
+ int extraLen = ZipShort.getValue(CFH_BUF, off);
+ off += SHORT;
+
+ int commentLen = ZipShort.getValue(CFH_BUF, off);
+ off += SHORT;
+
+ int diskStart = ZipShort.getValue(CFH_BUF, off);
+ off += SHORT;
+
+ ze.setInternalAttributes(ZipShort.getValue(CFH_BUF, off));
+ off += SHORT;
+
+ ze.setExternalAttributes(ZipLong.getValue(CFH_BUF, off));
+ off += WORD;
+
+ byte[] fileName = new byte[fileNameLen];
+ archive.readFully(fileName);
+ ze.setName(entryEncoding.decode(fileName), fileName);
+
+ // LFH offset,
+ offset.headerOffset = ZipLong.getValue(CFH_BUF, off);
+ // data offset will be filled later
+ entries.add(ze);
+
+ byte[] cdExtraData = new byte[extraLen];
+ archive.readFully(cdExtraData);
+ ze.setCentralDirectoryExtra(cdExtraData);
+
+ setSizesAndOffsetFromZip64Extra(ze, offset, diskStart);
+
+ byte[] comment = new byte[commentLen];
+ archive.readFully(comment);
+ ze.setComment(entryEncoding.decode(comment));
+
+ if (!hasUTF8Flag && useUnicodeExtraFields) {
+ noUTF8Flag.put(ze, new NameAndComment(fileName, comment));
+ }
+ }
+
+ /**
+ * If the entry holds a Zip64 extended information extra field,
+ * read sizes from there if the entry's sizes are set to
+ * 0xFFFFFFFFF, do the same for the offset of the local file
+ * header.
+ *
+ * Ensures the Zip64 extra either knows both compressed and
+ * uncompressed size or neither of both as the internal logic in
+ * ExtraFieldUtils forces the field to create local header data
+ * even if they are never used - and here a field with only one
+ * size would be invalid.
+ */
+ private void setSizesAndOffsetFromZip64Extra(ZipArchiveEntry ze,
+ OffsetEntry offset,
+ int diskStart)
+ throws IOException {
+ Zip64ExtendedInformationExtraField z64 =
+ (Zip64ExtendedInformationExtraField)
+ ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID);
+ if (z64 != null) {
+ boolean hasUncompressedSize = ze.getSize() == ZIP64_MAGIC;
+ boolean hasCompressedSize = ze.getCompressedSize() == ZIP64_MAGIC;
+ boolean hasRelativeHeaderOffset =
+ offset.headerOffset == ZIP64_MAGIC;
+ z64.reparseCentralDirectoryData(hasUncompressedSize,
+ hasCompressedSize,
+ hasRelativeHeaderOffset,
+ diskStart == ZIP64_MAGIC_SHORT);
+
+ if (hasUncompressedSize) {
+ ze.setSize(z64.getSize().getLongValue());
+ } else if (hasCompressedSize) {
+ z64.setSize(new ZipEightByteInteger(ze.getSize()));
+ }
+
+ if (hasCompressedSize) {
+ ze.setCompressedSize(z64.getCompressedSize().getLongValue());
+ } else if (hasUncompressedSize) {
+ z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize()));
+ }
+
+ if (hasRelativeHeaderOffset) {
+ offset.headerOffset =
+ z64.getRelativeHeaderOffset().getLongValue();
+ }
+ }
+ }
+
+ /**
+ * Length of the "End of central directory record" - which is
+ * supposed to be the last structure of the archive - without file
+ * comment.
+ */
+ static final int MIN_EOCD_SIZE =
+ /* end of central dir signature */ WORD
+ /* number of this disk */ + SHORT
+ /* number of the disk with the */
+ /* start of the central directory */ + SHORT
+ /* total number of entries in */
+ /* the central dir on this disk */ + SHORT
+ /* total number of entries in */
+ /* the central dir */ + SHORT
+ /* size of the central directory */ + WORD
+ /* offset of start of central */
+ /* directory with respect to */
+ /* the starting disk number */ + WORD
+ /* zipfile comment length */ + SHORT;
+
+ /**
+ * Maximum length of the "End of central directory record" with a
+ * file comment.
+ */
+ private static final int MAX_EOCD_SIZE = MIN_EOCD_SIZE
+ /* maximum length of zipfile comment */ + ZIP64_MAGIC_SHORT;
+
+ /**
+ * Offset of the field that holds the location of the first
+ * central directory entry inside the "End of central directory
+ * record" relative to the start of the "End of central directory
+ * record".
+ */
+ private static final int CFD_LOCATOR_OFFSET =
+ /* end of central dir signature */ WORD
+ /* number of this disk */ + SHORT
+ /* number of the disk with the */
+ /* start of the central directory */ + SHORT
+ /* total number of entries in */
+ /* the central dir on this disk */ + SHORT
+ /* total number of entries in */
+ /* the central dir */ + SHORT
+ /* size of the central directory */ + WORD;
+
+ /**
+ * Length of the "Zip64 end of central directory locator" - which
+ * should be right in front of the "end of central directory
+ * record" if one is present at all.
+ */
+ private static final int ZIP64_EOCDL_LENGTH =
+ /* zip64 end of central dir locator sig */ WORD
+ /* number of the disk with the start */
+ /* start of the zip64 end of */
+ /* central directory */ + WORD
+ /* relative offset of the zip64 */
+ /* end of central directory record */ + DWORD
+ /* total number of disks */ + WORD;
+
+ /**
+ * Offset of the field that holds the location of the "Zip64 end
+ * of central directory record" inside the "Zip64 end of central
+ * directory locator" relative to the start of the "Zip64 end of
+ * central directory locator".
+ */
+ private static final int ZIP64_EOCDL_LOCATOR_OFFSET =
+ /* zip64 end of central dir locator sig */ WORD
+ /* number of the disk with the start */
+ /* start of the zip64 end of */
+ /* central directory */ + WORD;
+
+ /**
+ * Offset of the field that holds the location of the first
+ * central directory entry inside the "Zip64 end of central
+ * directory record" relative to the start of the "Zip64 end of
+ * central directory record".
+ */
+ private static final int ZIP64_EOCD_CFD_LOCATOR_OFFSET =
+ /* zip64 end of central dir */
+ /* signature */ WORD
+ /* size of zip64 end of central */
+ /* directory record */ + DWORD
+ /* version made by */ + SHORT
+ /* version needed to extract */ + SHORT
+ /* number of this disk */ + WORD
+ /* number of the disk with the */
+ /* start of the central directory */ + WORD
+ /* total number of entries in the */
+ /* central directory on this disk */ + DWORD
+ /* total number of entries in the */
+ /* central directory */ + DWORD
+ /* size of the central directory */ + DWORD;
+
+ /**
+ * Searches for either the "Zip64 end of central directory
+ * locator" or the "End of central dir record", parses
+ * it and positions the stream at the first central directory
+ * record.
+ */
+ private void positionAtCentralDirectory()
+ throws IOException {
+ positionAtEndOfCentralDirectoryRecord();
+ boolean found = false;
+ boolean searchedForZip64EOCD =
+ archive.getFilePointer() > ZIP64_EOCDL_LENGTH;
+ if (searchedForZip64EOCD) {
+ archive.seek(archive.getFilePointer() - ZIP64_EOCDL_LENGTH);
+ archive.readFully(WORD_BUF);
+ found = Arrays.equals(ZipArchiveOutputStream.ZIP64_EOCD_LOC_SIG,
+ WORD_BUF);
+ }
+ if (!found) {
+ // not a ZIP64 archive
+ if (searchedForZip64EOCD) {
+ skipBytes(ZIP64_EOCDL_LENGTH - WORD);
+ }
+ positionAtCentralDirectory32();
+ } else {
+ positionAtCentralDirectory64();
+ }
+ }
+
+ /**
+ * Parses the "Zip64 end of central directory locator",
+ * finds the "Zip64 end of central directory record" using the
+ * parsed information, parses that and positions the stream at the
+ * first central directory record.
+ *
+ * Expects stream to be positioned right behind the "Zip64
+ * end of central directory locator"'s signature.
+ */
+ private void positionAtCentralDirectory64()
+ throws IOException {
+ skipBytes(ZIP64_EOCDL_LOCATOR_OFFSET
+ - WORD /* signature has already been read */);
+ archive.readFully(DWORD_BUF);
+ archive.seek(ZipEightByteInteger.getLongValue(DWORD_BUF));
+ archive.readFully(WORD_BUF);
+ if (!Arrays.equals(WORD_BUF, ZipArchiveOutputStream.ZIP64_EOCD_SIG)) {
+ throw new ZipException("archive's ZIP64 end of central "
+ + "directory locator is corrupt.");
+ }
+ skipBytes(ZIP64_EOCD_CFD_LOCATOR_OFFSET
+ - WORD /* signature has already been read */);
+ archive.readFully(DWORD_BUF);
+ archive.seek(ZipEightByteInteger.getLongValue(DWORD_BUF));
+ }
+
+ /**
+ * Parses the "End of central dir record" and positions
+ * the stream at the first central directory record.
+ *
+ * Expects stream to be positioned at the beginning of the
+ * "End of central dir record".
+ */
+ private void positionAtCentralDirectory32()
+ throws IOException {
+ skipBytes(CFD_LOCATOR_OFFSET);
+ archive.readFully(WORD_BUF);
+ archive.seek(ZipLong.getValue(WORD_BUF));
+ }
+
+ /**
+ * Searches for the and positions the stream at the start of the
+ * "End of central dir record".
+ */
+ private void positionAtEndOfCentralDirectoryRecord()
+ throws IOException {
+ boolean found = tryToLocateSignature(MIN_EOCD_SIZE, MAX_EOCD_SIZE,
+ ZipArchiveOutputStream.EOCD_SIG);
+ if (!found) {
+ throw new ZipException("archive is not a ZIP archive");
+ }
+ }
+
+ /**
+ * Searches the archive backwards from minDistance to maxDistance
+ * for the given signature, positions the RandomaccessFile right
+ * at the signature if it has been found.
+ */
+ private boolean tryToLocateSignature(long minDistanceFromEnd,
+ long maxDistanceFromEnd,
+ byte[] sig) throws IOException {
+ boolean found = false;
+ long off = archive.length() - minDistanceFromEnd;
+ final long stopSearching =
+ Math.max(0L, archive.length() - maxDistanceFromEnd);
+ if (off >= 0) {
+ for (; off >= stopSearching; off--) {
+ archive.seek(off);
+ int curr = archive.read();
+ if (curr == -1) {
+ break;
+ }
+ if (curr == sig[POS_0]) {
+ curr = archive.read();
+ if (curr == sig[POS_1]) {
+ curr = archive.read();
+ if (curr == sig[POS_2]) {
+ curr = archive.read();
+ if (curr == sig[POS_3]) {
+ found = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ if (found) {
+ archive.seek(off);
+ }
+ return found;
+ }
+
+ /**
+ * Skips the given number of bytes or throws an EOFException if
+ * skipping failed.
+ */
+ private void skipBytes(final int count) throws IOException {
+ int totalSkipped = 0;
+ while (totalSkipped < count) {
+ int skippedNow = archive.skipBytes(count - totalSkipped);
+ if (skippedNow <= 0) {
+ throw new EOFException();
+ }
+ totalSkipped += skippedNow;
+ }
+ }
+
+ /**
+ * Number of bytes in local file header up to the "length of
+ * filename" entry.
+ */
+ private static final long LFH_OFFSET_FOR_FILENAME_LENGTH =
+ /* local file header signature */ WORD
+ /* version needed to extract */ + SHORT
+ /* general purpose bit flag */ + SHORT
+ /* compression method */ + SHORT
+ /* last mod file time */ + SHORT
+ /* last mod file date */ + SHORT
+ /* crc-32 */ + WORD
+ /* compressed size */ + WORD
+ /* uncompressed size */ + WORD;
+
+ /**
+ * Walks through all recorded entries and adds the data available
+ * from the local file header.
+ *
+ * Also records the offsets for the data to read from the
+ * entries.
+ */
+ private void resolveLocalFileHeaderData(Map
+ entriesWithoutUTF8Flag)
+ throws IOException {
+ for (ZipArchiveEntry zipArchiveEntry : entries) {
+ // entries is filled in populateFromCentralDirectory and
+ // never modified
+ Entry ze = (Entry) zipArchiveEntry;
+ OffsetEntry offsetEntry = ze.getOffsetEntry();
+ long offset = offsetEntry.headerOffset;
+ archive.seek(offset + LFH_OFFSET_FOR_FILENAME_LENGTH);
+ archive.readFully(SHORT_BUF);
+ int fileNameLen = ZipShort.getValue(SHORT_BUF);
+ archive.readFully(SHORT_BUF);
+ int extraFieldLen = ZipShort.getValue(SHORT_BUF);
+ int lenToSkip = fileNameLen;
+ while (lenToSkip > 0) {
+ int skipped = archive.skipBytes(lenToSkip);
+ if (skipped <= 0) {
+ throw new IOException("failed to skip file name in"
+ + " local file header");
+ }
+ lenToSkip -= skipped;
+ }
+ byte[] localExtraData = new byte[extraFieldLen];
+ archive.readFully(localExtraData);
+ ze.setExtra(localExtraData);
+ offsetEntry.dataOffset = offset + LFH_OFFSET_FOR_FILENAME_LENGTH
+ + SHORT + SHORT + fileNameLen + extraFieldLen;
+
+ if (entriesWithoutUTF8Flag.containsKey(ze)) {
+ NameAndComment nc = entriesWithoutUTF8Flag.get(ze);
+ ZipUtil.setNameAndCommentFromExtraFields(ze, nc.name,
+ nc.comment);
+ }
+
+ String name = ze.getName();
+ LinkedList entriesOfThatName = nameMap.get(name);
+ if (entriesOfThatName == null) {
+ entriesOfThatName = new LinkedList();
+ nameMap.put(name, entriesOfThatName);
+ }
+ entriesOfThatName.addLast(ze);
+ }
+ }
+
+ /**
+ * Checks whether the archive starts with a LFH. If it doesn't,
+ * it may be an empty archive.
+ */
+ private boolean startsWithLocalFileHeader() throws IOException {
+ archive.seek(0);
+ archive.readFully(WORD_BUF);
+ return Arrays.equals(WORD_BUF, ZipArchiveOutputStream.LFH_SIG);
+ }
+
+ /**
+ * InputStream that delegates requests to the underlying
+ * RandomAccessFile, making sure that only bytes from a certain
+ * range can be read.
+ */
+ private class BoundedInputStream extends InputStream {
+ private long remaining;
+ private long loc;
+ private boolean addDummyByte = false;
+
+ BoundedInputStream(long start, long remaining) {
+ this.remaining = remaining;
+ loc = start;
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (remaining-- <= 0) {
+ if (addDummyByte) {
+ addDummyByte = false;
+ return 0;
+ }
+ return -1;
+ }
+ synchronized (archive) {
+ archive.seek(loc++);
+ return archive.read();
+ }
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ if (remaining <= 0) {
+ if (addDummyByte) {
+ addDummyByte = false;
+ b[off] = 0;
+ return 1;
+ }
+ return -1;
+ }
+
+ if (len <= 0) {
+ return 0;
+ }
+
+ if (len > remaining) {
+ len = (int) remaining;
+ }
+ int ret = -1;
+ synchronized (archive) {
+ archive.seek(loc);
+ ret = archive.read(b, off, len);
+ }
+ if (ret > 0) {
+ loc += ret;
+ remaining -= ret;
+ }
+ return ret;
+ }
+
+ /**
+ * Inflater needs an extra dummy byte for nowrap - see
+ * Inflater's javadocs.
+ */
+ void addDummy() {
+ addDummyByte = true;
+ }
+ }
+
+ private static final class NameAndComment {
+ private final byte[] name;
+ private final byte[] comment;
+ private NameAndComment(byte[] name, byte[] comment) {
+ this.name = name;
+ this.comment = comment;
+ }
+ }
+
+ /**
+ * Compares two ZipArchiveEntries based on their offset within the archive.
+ *
+ * Won't return any meaningful results if one of the entries
+ * isn't part of the archive at all.
+ *
+ * @since 1.1
+ */
+ private final Comparator OFFSET_COMPARATOR =
+ new Comparator() {
+ public int compare(ZipArchiveEntry e1, ZipArchiveEntry e2) {
+ if (e1 == e2) {
+ return 0;
+ }
+
+ Entry ent1 = e1 instanceof Entry ? (Entry) e1 : null;
+ Entry ent2 = e2 instanceof Entry ? (Entry) e2 : null;
+ if (ent1 == null) {
+ return 1;
+ }
+ if (ent2 == null) {
+ return -1;
+ }
+ long val = (ent1.getOffsetEntry().headerOffset
+ - ent2.getOffsetEntry().headerOffset);
+ return val == 0 ? 0 : val < 0 ? -1 : +1;
+ }
+ };
+
+ /**
+ * Extends ZipArchiveEntry to store the offset within the archive.
+ */
+ private static class Entry extends ZipArchiveEntry {
+
+ private final OffsetEntry offsetEntry;
+
+ Entry(OffsetEntry offset) {
+ this.offsetEntry = offset;
+ }
+
+ OffsetEntry getOffsetEntry() {
+ return offsetEntry;
+ }
+
+ @Override
+ public int hashCode() {
+ return 3 * super.hashCode()
+ + (int) (offsetEntry.headerOffset % Integer.MAX_VALUE);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (super.equals(other)) {
+ // super.equals would return false if other were not an Entry
+ Entry otherEntry = (Entry) other;
+ return offsetEntry.headerOffset
+ == otherEntry.offsetEntry.headerOffset
+ && offsetEntry.dataOffset
+ == otherEntry.offsetEntry.dataOffset;
+ }
+ return false;
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipLong.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipLong.java
new file mode 100644
index 000000000..c3815441a
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipLong.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.Serializable;
+
+import static org.apache.commons.compress.archivers.zip.ZipConstants.BYTE_MASK;
+import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD;
+
+/**
+ * Utility class that represents a four byte integer with conversion
+ * rules for the big endian byte order of ZIP files.
+ * @Immutable
+ */
+public final class ZipLong implements Cloneable, Serializable {
+ private static final long serialVersionUID = 1L;
+
+ //private static final int BYTE_BIT_SIZE = 8;
+
+ private static final int BYTE_1 = 1;
+ private static final int BYTE_1_MASK = 0xFF00;
+ private static final int BYTE_1_SHIFT = 8;
+
+ private static final int BYTE_2 = 2;
+ private static final int BYTE_2_MASK = 0xFF0000;
+ private static final int BYTE_2_SHIFT = 16;
+
+ private static final int BYTE_3 = 3;
+ private static final long BYTE_3_MASK = 0xFF000000L;
+ private static final int BYTE_3_SHIFT = 24;
+
+ private final long value;
+
+ /** Central File Header Signature */
+ public static final ZipLong CFH_SIG = new ZipLong(0X02014B50L);
+
+ /** Local File Header Signature */
+ public static final ZipLong LFH_SIG = new ZipLong(0X04034B50L);
+
+ /**
+ * Data Descriptor signature.
+ *
+ * Actually, PKWARE uses this as marker for split/spanned
+ * archives and other archivers have started to use it as Data
+ * Descriptor signature (as well).
+ * @since 1.1
+ */
+ public static final ZipLong DD_SIG = new ZipLong(0X08074B50L);
+
+ /**
+ * Value stored in size and similar fields if ZIP64 extensions are
+ * used.
+ * @since 1.3
+ */
+ static final ZipLong ZIP64_MAGIC = new ZipLong(ZipConstants.ZIP64_MAGIC);
+
+ /**
+ * Marks ZIP archives that were supposed to be split or spanned
+ * but only needed a single segment in then end (so are actually
+ * neither split nor spanned).
+ *
+ * This is the "PK00" prefix found in some archives.
+ * @since 1.5
+ */
+ public static final ZipLong SINGLE_SEGMENT_SPLIT_MARKER =
+ new ZipLong(0X30304B50L);
+
+ /**
+ * Archive extra data record signature.
+ * @since 1.5
+ */
+ public static final ZipLong AED_SIG = new ZipLong(0X08064B50L);
+
+ /**
+ * Create instance from a number.
+ * @param value the long to store as a ZipLong
+ */
+ public ZipLong(long value) {
+ this.value = value;
+ }
+
+ /**
+ * Create instance from bytes.
+ * @param bytes the bytes to store as a ZipLong
+ */
+ public ZipLong (byte[] bytes) {
+ this(bytes, 0);
+ }
+
+ /**
+ * Create instance from the four bytes starting at offset.
+ * @param bytes the bytes to store as a ZipLong
+ * @param offset the offset to start
+ */
+ public ZipLong (byte[] bytes, int offset) {
+ value = ZipLong.getValue(bytes, offset);
+ }
+
+ /**
+ * Get value as four bytes in big endian byte order.
+ * @return value as four bytes in big endian order
+ */
+ public byte[] getBytes() {
+ return ZipLong.getBytes(value);
+ }
+
+ /**
+ * Get value as Java long.
+ * @return value as a long
+ */
+ public long getValue() {
+ return value;
+ }
+
+ /**
+ * Get value as four bytes in big endian byte order.
+ * @param value the value to convert
+ * @return value as four bytes in big endian byte order
+ */
+ public static byte[] getBytes(long value) {
+ byte[] result = new byte[WORD];
+ result[0] = (byte) ((value & BYTE_MASK));
+ result[BYTE_1] = (byte) ((value & BYTE_1_MASK) >> BYTE_1_SHIFT);
+ result[BYTE_2] = (byte) ((value & BYTE_2_MASK) >> BYTE_2_SHIFT);
+ result[BYTE_3] = (byte) ((value & BYTE_3_MASK) >> BYTE_3_SHIFT);
+ return result;
+ }
+
+ /**
+ * Helper method to get the value as a Java long from four bytes starting at given array offset
+ * @param bytes the array of bytes
+ * @param offset the offset to start
+ * @return the corresponding Java long value
+ */
+ public static long getValue(byte[] bytes, int offset) {
+ long value = (bytes[offset + BYTE_3] << BYTE_3_SHIFT) & BYTE_3_MASK;
+ value += (bytes[offset + BYTE_2] << BYTE_2_SHIFT) & BYTE_2_MASK;
+ value += (bytes[offset + BYTE_1] << BYTE_1_SHIFT) & BYTE_1_MASK;
+ value += (bytes[offset] & BYTE_MASK);
+ return value;
+ }
+
+ /**
+ * Helper method to get the value as a Java long from a four-byte array
+ * @param bytes the array of bytes
+ * @return the corresponding Java long value
+ */
+ public static long getValue(byte[] bytes) {
+ return getValue(bytes, 0);
+ }
+
+ /**
+ * Override to make two instances with same value equal.
+ * @param o an object to compare
+ * @return true if the objects are equal
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (o == null || !(o instanceof ZipLong)) {
+ return false;
+ }
+ return value == ((ZipLong) o).getValue();
+ }
+
+ /**
+ * Override to make two instances with same value equal.
+ * @return the value stored in the ZipLong
+ */
+ @Override
+ public int hashCode() {
+ return (int) value;
+ }
+
+ @Override
+ public Object clone() {
+ try {
+ return super.clone();
+ } catch (CloneNotSupportedException cnfe) {
+ // impossible
+ throw new RuntimeException(cnfe);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "ZipLong value: " + value;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipMethod.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipMethod.java
new file mode 100644
index 000000000..4dafafb31
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipMethod.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.zip.ZipEntry;
+
+/**
+ * List of known compression methods
+ *
+ * Many of these methods are currently not supported by commons compress
+ *
+ * @since 1.5
+ */
+public enum ZipMethod {
+
+ /**
+ * Compression method 0 for uncompressed entries.
+ *
+ * @see ZipEntry#STORED
+ */
+ STORED(ZipEntry.STORED),
+
+ /**
+ * UnShrinking.
+ * dynamic Lempel-Ziv-Welch-Algorithm
+ *
+ * @see Explanation of fields: compression
+ * method: (2 bytes)
+ */
+ UNSHRINKING(1),
+
+ /**
+ * Reduced with compression factor 1.
+ *
+ * @see Explanation of fields: compression
+ * method: (2 bytes)
+ */
+ EXPANDING_LEVEL_1(2),
+
+ /**
+ * Reduced with compression factor 2.
+ *
+ * @see Explanation of fields: compression
+ * method: (2 bytes)
+ */
+ EXPANDING_LEVEL_2(3),
+
+ /**
+ * Reduced with compression factor 3.
+ *
+ * @see Explanation of fields: compression
+ * method: (2 bytes)
+ */
+ EXPANDING_LEVEL_3(4),
+
+ /**
+ * Reduced with compression factor 4.
+ *
+ * @see Explanation of fields: compression
+ * method: (2 bytes)
+ */
+ EXPANDING_LEVEL_4(5),
+
+ /**
+ * Imploding.
+ *
+ * @see Explanation of fields: compression
+ * method: (2 bytes)
+ */
+ IMPLODING(6),
+
+ /**
+ * Tokenization.
+ *
+ * @see Explanation of fields: compression
+ * method: (2 bytes)
+ */
+ TOKENIZATION(7),
+
+ /**
+ * Compression method 8 for compressed (deflated) entries.
+ *
+ * @see ZipEntry#DEFLATED
+ */
+ DEFLATED(ZipEntry.DEFLATED),
+
+ /**
+ * Compression Method 9 for enhanced deflate.
+ *
+ * @see http://www.winzip.com/wz54.htm
+ */
+ ENHANCED_DEFLATED(9),
+
+ /**
+ * PKWARE Data Compression Library Imploding.
+ *
+ * @see http://www.winzip.com/wz54.htm
+ */
+ PKWARE_IMPLODING(10),
+
+ /**
+ * Compression Method 12 for bzip2.
+ *
+ * @see http://www.winzip.com/wz54.htm
+ */
+ BZIP2(12),
+
+ /**
+ * Compression Method 14 for LZMA.
+ *
+ * @see http://www.7-zip.org/sdk.html
+ * @see http://www.winzip.com/wz54.htm
+ */
+ LZMA(14),
+
+
+ /**
+ * Compression Method 96 for Jpeg compression.
+ *
+ * @see http://www.winzip.com/wz54.htm
+ */
+ JPEG(96),
+
+ /**
+ * Compression Method 97 for WavPack.
+ *
+ * @see http://www.winzip.com/wz54.htm
+ */
+ WAVPACK(97),
+
+ /**
+ * Compression Method 98 for PPMd.
+ *
+ * @see http://www.winzip.com/wz54.htm
+ */
+ PPMD(98),
+
+
+ /**
+ * Compression Method 99 for AES encryption.
+ *
+ * @see http://www.winzip.com/wz54.htm
+ */
+ AES_ENCRYPTED(99),
+
+ /**
+ * Unknown compression method.
+ */
+ UNKNOWN(-1);
+
+ private final int code;
+
+ private static final Map codeToEnum;
+
+ static {
+ Map cte = new HashMap();
+ for (ZipMethod method : values()) {
+ cte.put(Integer.valueOf(method.getCode()), method);
+ }
+ codeToEnum = Collections.unmodifiableMap(cte);
+ }
+
+ /**
+ * private constructor for enum style class.
+ */
+ ZipMethod(int code) {
+ this.code = code;
+ }
+
+ /**
+ * the code of the compression method.
+ *
+ * @see ZipArchiveEntry#getMethod()
+ *
+ * @return an integer code for the method
+ */
+ public int getCode() {
+ return code;
+ }
+
+
+ /**
+ * returns the {@link ZipMethod} for the given code or null if the
+ * method is not known.
+ */
+ public static ZipMethod getMethodByCode(int code) {
+ return codeToEnum.get(Integer.valueOf(code));
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipShort.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipShort.java
new file mode 100644
index 000000000..b74db86ab
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipShort.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.Serializable;
+
+import static org.apache.commons.compress.archivers.zip.ZipConstants.BYTE_MASK;
+
+/**
+ * Utility class that represents a two byte integer with conversion
+ * rules for the big endian byte order of ZIP files.
+ * @Immutable
+ */
+public final class ZipShort implements Cloneable, Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private static final int BYTE_1_MASK = 0xFF00;
+ private static final int BYTE_1_SHIFT = 8;
+
+ private final int value;
+
+ /**
+ * Create instance from a number.
+ * @param value the int to store as a ZipShort
+ */
+ public ZipShort (int value) {
+ this.value = value;
+ }
+
+ /**
+ * Create instance from bytes.
+ * @param bytes the bytes to store as a ZipShort
+ */
+ public ZipShort (byte[] bytes) {
+ this(bytes, 0);
+ }
+
+ /**
+ * Create instance from the two bytes starting at offset.
+ * @param bytes the bytes to store as a ZipShort
+ * @param offset the offset to start
+ */
+ public ZipShort (byte[] bytes, int offset) {
+ value = ZipShort.getValue(bytes, offset);
+ }
+
+ /**
+ * Get value as two bytes in big endian byte order.
+ * @return the value as a a two byte array in big endian byte order
+ */
+ public byte[] getBytes() {
+ byte[] result = new byte[2];
+ result[0] = (byte) (value & BYTE_MASK);
+ result[1] = (byte) ((value & BYTE_1_MASK) >> BYTE_1_SHIFT);
+ return result;
+ }
+
+ /**
+ * Get value as Java int.
+ * @return value as a Java int
+ */
+ public int getValue() {
+ return value;
+ }
+
+ /**
+ * Get value as two bytes in big endian byte order.
+ * @param value the Java int to convert to bytes
+ * @return the converted int as a byte array in big endian byte order
+ */
+ public static byte[] getBytes(int value) {
+ byte[] result = new byte[2];
+ result[0] = (byte) (value & BYTE_MASK);
+ result[1] = (byte) ((value & BYTE_1_MASK) >> BYTE_1_SHIFT);
+ return result;
+ }
+
+ /**
+ * Helper method to get the value as a java int from two bytes starting at given array offset
+ * @param bytes the array of bytes
+ * @param offset the offset to start
+ * @return the corresponding java int value
+ */
+ public static int getValue(byte[] bytes, int offset) {
+ int value = (bytes[offset + 1] << BYTE_1_SHIFT) & BYTE_1_MASK;
+ value += (bytes[offset] & BYTE_MASK);
+ return value;
+ }
+
+ /**
+ * Helper method to get the value as a java int from a two-byte array
+ * @param bytes the array of bytes
+ * @return the corresponding java int value
+ */
+ public static int getValue(byte[] bytes) {
+ return getValue(bytes, 0);
+ }
+
+ /**
+ * Override to make two instances with same value equal.
+ * @param o an object to compare
+ * @return true if the objects are equal
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (o == null || !(o instanceof ZipShort)) {
+ return false;
+ }
+ return value == ((ZipShort) o).getValue();
+ }
+
+ /**
+ * Override to make two instances with same value equal.
+ * @return the value stored in the ZipShort
+ */
+ @Override
+ public int hashCode() {
+ return value;
+ }
+
+ @Override
+ public Object clone() {
+ try {
+ return super.clone();
+ } catch (CloneNotSupportedException cnfe) {
+ // impossible
+ throw new RuntimeException(cnfe);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "ZipShort value: " + value;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipUtil.java b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipUtil.java
new file mode 100644
index 000000000..a138f6e40
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/ZipUtil.java
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.archivers.zip;
+
+import java.io.IOException;
+import java.math.BigInteger;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.zip.CRC32;
+import java.util.zip.ZipEntry;
+
+/**
+ * Utility class for handling DOS and Java time conversions.
+ * @Immutable
+ */
+public abstract class ZipUtil {
+ /**
+ * Smallest date/time ZIP can handle.
+ */
+ private static final byte[] DOS_TIME_MIN = ZipLong.getBytes(0x00002100L);
+
+ /**
+ * Convert a Date object to a DOS date/time field.
+ * @param time the Date to convert
+ * @return the date as a ZipLong
+ */
+ public static ZipLong toDosTime(Date time) {
+ return new ZipLong(toDosTime(time.getTime()));
+ }
+
+ /**
+ * Convert a Date object to a DOS date/time field.
+ *
+ * Stolen from InfoZip's fileio.c
+ * @param t number of milliseconds since the epoch
+ * @return the date as a byte array
+ */
+ public static byte[] toDosTime(long t) {
+ Calendar c = Calendar.getInstance();
+ c.setTimeInMillis(t);
+
+ int year = c.get(Calendar.YEAR);
+ if (year < 1980) {
+ return copy(DOS_TIME_MIN); // stop callers from changing the array
+ }
+ int month = c.get(Calendar.MONTH) + 1;
+ long value = ((year - 1980) << 25)
+ | (month << 21)
+ | (c.get(Calendar.DAY_OF_MONTH) << 16)
+ | (c.get(Calendar.HOUR_OF_DAY) << 11)
+ | (c.get(Calendar.MINUTE) << 5)
+ | (c.get(Calendar.SECOND) >> 1);
+ return ZipLong.getBytes(value);
+ }
+
+ /**
+ * Assumes a negative integer really is a positive integer that
+ * has wrapped around and re-creates the original value.
+ *
+ * @param i the value to treat as unsigned int.
+ * @return the unsigned int as a long.
+ */
+ public static long adjustToLong(int i) {
+ if (i < 0) {
+ return 2 * ((long) Integer.MAX_VALUE) + 2 + i;
+ } else {
+ return i;
+ }
+ }
+
+ /**
+ * Reverses a byte[] array. Reverses in-place (thus provided array is
+ * mutated), but also returns same for convenience.
+ *
+ * @param array to reverse (mutated in-place, but also returned for
+ * convenience).
+ *
+ * @return the reversed array (mutated in-place, but also returned for
+ * convenience).
+ * @since 1.5
+ */
+ public static byte[] reverse(final byte[] array) {
+ final int z = array.length - 1; // position of last element
+ for (int i = 0; i < array.length / 2; i++) {
+ byte x = array[i];
+ array[i] = array[z - i];
+ array[z - i] = x;
+ }
+ return array;
+ }
+
+ /**
+ * Converts a BigInteger into a long, and blows up
+ * (NumberFormatException) if the BigInteger is too big.
+ *
+ * @param big BigInteger to convert.
+ * @return long representation of the BigInteger.
+ */
+ static long bigToLong(BigInteger big) {
+ if (big.bitLength() <= 63) { // bitLength() doesn't count the sign bit.
+ return big.longValue();
+ } else {
+ throw new NumberFormatException("The BigInteger cannot fit inside a 64 bit java long: [" + big + "]");
+ }
+ }
+
+ /**
+ *
+ * Converts a long into a BigInteger. Negative numbers between -1 and
+ * -2^31 are treated as unsigned 32 bit (e.g., positive) integers.
+ * Negative numbers below -2^31 cause an IllegalArgumentException
+ * to be thrown.
+ *
+ *
+ * @param l long to convert to BigInteger.
+ * @return BigInteger representation of the provided long.
+ */
+ static BigInteger longToBig(long l) {
+ if (l < Integer.MIN_VALUE) {
+ throw new IllegalArgumentException("Negative longs < -2^31 not permitted: [" + l + "]");
+ } else if (l < 0 && l >= Integer.MIN_VALUE) {
+ // If someone passes in a -2, they probably mean 4294967294
+ // (For example, Unix UID/GID's are 32 bit unsigned.)
+ l = ZipUtil.adjustToLong((int) l);
+ }
+ return BigInteger.valueOf(l);
+ }
+
+ /**
+ * Converts a signed byte into an unsigned integer representation
+ * (e.g., -1 becomes 255).
+ *
+ * @param b byte to convert to int
+ * @return int representation of the provided byte
+ * @since 1.5
+ */
+ public static int signedByteToUnsignedInt(byte b) {
+ if (b >= 0) {
+ return b;
+ } else {
+ return 256 + b;
+ }
+ }
+
+ /**
+ * Converts an unsigned integer to a signed byte (e.g., 255 becomes -1).
+ *
+ * @param i integer to convert to byte
+ * @return byte representation of the provided int
+ * @throws IllegalArgumentException if the provided integer is not inside the range [0,255].
+ * @since 1.5
+ */
+ public static byte unsignedIntToSignedByte(int i) {
+ if (i > 255 || i < 0) {
+ throw new IllegalArgumentException("Can only convert non-negative integers between [0,255] to byte: [" + i + "]");
+ }
+ if (i < 128) {
+ return (byte) i;
+ } else {
+ return (byte) (i - 256);
+ }
+ }
+
+ /**
+ * Convert a DOS date/time field to a Date object.
+ *
+ * @param zipDosTime contains the stored DOS time.
+ * @return a Date instance corresponding to the given time.
+ */
+ public static Date fromDosTime(ZipLong zipDosTime) {
+ long dosTime = zipDosTime.getValue();
+ return new Date(dosToJavaTime(dosTime));
+ }
+
+ /**
+ * Converts DOS time to Java time (number of milliseconds since
+ * epoch).
+ */
+ public static long dosToJavaTime(long dosTime) {
+ Calendar cal = Calendar.getInstance();
+ // CheckStyle:MagicNumberCheck OFF - no point
+ cal.set(Calendar.YEAR, (int) ((dosTime >> 25) & 0x7f) + 1980);
+ cal.set(Calendar.MONTH, (int) ((dosTime >> 21) & 0x0f) - 1);
+ cal.set(Calendar.DATE, (int) (dosTime >> 16) & 0x1f);
+ cal.set(Calendar.HOUR_OF_DAY, (int) (dosTime >> 11) & 0x1f);
+ cal.set(Calendar.MINUTE, (int) (dosTime >> 5) & 0x3f);
+ cal.set(Calendar.SECOND, (int) (dosTime << 1) & 0x3e);
+ cal.set(Calendar.MILLISECOND, 0);
+ // CheckStyle:MagicNumberCheck ON
+ return cal.getTime().getTime();
+ }
+
+ /**
+ * If the entry has Unicode*ExtraFields and the CRCs of the
+ * names/comments match those of the extra fields, transfer the
+ * known Unicode values from the extra field.
+ */
+ static void setNameAndCommentFromExtraFields(ZipArchiveEntry ze,
+ byte[] originalNameBytes,
+ byte[] commentBytes) {
+ UnicodePathExtraField name = (UnicodePathExtraField)
+ ze.getExtraField(UnicodePathExtraField.UPATH_ID);
+ String originalName = ze.getName();
+ String newName = getUnicodeStringIfOriginalMatches(name,
+ originalNameBytes);
+ if (newName != null && !originalName.equals(newName)) {
+ ze.setName(newName);
+ }
+
+ if (commentBytes != null && commentBytes.length > 0) {
+ UnicodeCommentExtraField cmt = (UnicodeCommentExtraField)
+ ze.getExtraField(UnicodeCommentExtraField.UCOM_ID);
+ String newComment =
+ getUnicodeStringIfOriginalMatches(cmt, commentBytes);
+ if (newComment != null) {
+ ze.setComment(newComment);
+ }
+ }
+ }
+
+ /**
+ * If the stored CRC matches the one of the given name, return the
+ * Unicode name of the given field.
+ *
+ * If the field is null or the CRCs don't match, return null
+ * instead.
+ */
+ private static
+ String getUnicodeStringIfOriginalMatches(AbstractUnicodeExtraField f,
+ byte[] orig) {
+ if (f != null) {
+ CRC32 crc32 = new CRC32();
+ crc32.update(orig);
+ long origCRC32 = crc32.getValue();
+
+ if (origCRC32 == f.getNameCRC32()) {
+ try {
+ return ZipEncodingHelper
+ .UTF8_ZIP_ENCODING.decode(f.getUnicodeName());
+ } catch (IOException ex) {
+ // UTF-8 unsupported? should be impossible the
+ // Unicode*ExtraField must contain some bad bytes
+
+ // TODO log this anywhere?
+ return null;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Create a copy of the given array - or return null if the
+ * argument is null.
+ */
+ static byte[] copy(byte[] from) {
+ if (from != null) {
+ byte[] to = new byte[from.length];
+ System.arraycopy(from, 0, to, 0, to.length);
+ return to;
+ }
+ return null;
+ }
+
+ /**
+ * Whether this library is able to read or write the given entry.
+ */
+ static boolean canHandleEntryData(ZipArchiveEntry entry) {
+ return supportsEncryptionOf(entry) && supportsMethodOf(entry);
+ }
+
+ /**
+ * Whether this library supports the encryption used by the given
+ * entry.
+ *
+ * @return true if the entry isn't encrypted at all
+ */
+ private static boolean supportsEncryptionOf(ZipArchiveEntry entry) {
+ return !entry.getGeneralPurposeBit().usesEncryption();
+ }
+
+ /**
+ * Whether this library supports the compression method used by
+ * the given entry.
+ *
+ * @return true if the compression method is STORED or DEFLATED
+ */
+ private static boolean supportsMethodOf(ZipArchiveEntry entry) {
+ return entry.getMethod() == ZipEntry.STORED
+ || entry.getMethod() == ZipMethod.UNSHRINKING.getCode()
+ || entry.getMethod() == ZipMethod.IMPLODING.getCode()
+ || entry.getMethod() == ZipEntry.DEFLATED;
+ }
+
+ /**
+ * Checks whether the entry requires features not (yet) supported
+ * by the library and throws an exception if it does.
+ */
+ static void checkRequestedFeatures(ZipArchiveEntry ze)
+ throws UnsupportedZipFeatureException {
+ if (!supportsEncryptionOf(ze)) {
+ throw
+ new UnsupportedZipFeatureException(UnsupportedZipFeatureException
+ .Feature.ENCRYPTION, ze);
+ }
+ if (!supportsMethodOf(ze)) {
+ ZipMethod m = ZipMethod.getMethodByCode(ze.getMethod());
+ if (m == null) {
+ throw
+ new UnsupportedZipFeatureException(UnsupportedZipFeatureException
+ .Feature.METHOD, ze);
+ } else {
+ throw new UnsupportedZipFeatureException(m, ze);
+ }
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/package.html
new file mode 100644
index 000000000..521687be6
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/archivers/zip/package.html
@@ -0,0 +1,24 @@
+
+
+
+ Provides stream classes for reading and writing archives using
+ the ZIP format.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/changes/Change.java b/Tools/Cache Editor/src/org/apache/commons/compress/changes/Change.java
new file mode 100644
index 000000000..c1b0a4c50
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/changes/Change.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.changes;
+
+import java.io.InputStream;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+
+/**
+ * Change holds meta information about a change.
+ *
+ * @Immutable
+ */
+class Change {
+ private final String targetFile; // entry name to delete
+ private final ArchiveEntry entry; // new entry to add
+ private final InputStream input; // source for new entry
+ private final boolean replaceMode; // change should replaceMode existing entries
+
+ // Type of change
+ private final int type;
+ // Possible type values
+ static final int TYPE_DELETE = 1;
+ static final int TYPE_ADD = 2;
+ static final int TYPE_MOVE = 3; // NOT USED
+ static final int TYPE_DELETE_DIR = 4;
+
+ /**
+ * Constructor. Takes the filename of the file to be deleted
+ * from the stream as argument.
+ * @param pFilename the filename of the file to delete
+ */
+ Change(final String pFilename, int type) {
+ if(pFilename == null) {
+ throw new NullPointerException();
+ }
+ this.targetFile = pFilename;
+ this.type = type;
+ this.input = null;
+ this.entry = null;
+ this.replaceMode = true;
+ }
+
+ /**
+ * Construct a change which adds an entry.
+ *
+ * @param pEntry the entry details
+ * @param pInput the InputStream for the entry data
+ */
+ Change(final ArchiveEntry pEntry, final InputStream pInput, boolean replace) {
+ if(pEntry == null || pInput == null) {
+ throw new NullPointerException();
+ }
+ this.entry = pEntry;
+ this.input = pInput;
+ type = TYPE_ADD;
+ targetFile = null;
+ this.replaceMode = replace;
+ }
+
+ ArchiveEntry getEntry() {
+ return entry;
+ }
+
+ InputStream getInput() {
+ return input;
+ }
+
+ String targetFile() {
+ return targetFile;
+ }
+
+ int type() {
+ return type;
+ }
+
+ boolean isReplaceMode() {
+ return replaceMode;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/changes/ChangeSet.java b/Tools/Cache Editor/src/org/apache/commons/compress/changes/ChangeSet.java
new file mode 100644
index 000000000..31155f627
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/changes/ChangeSet.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.changes;
+
+import java.io.InputStream;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.Set;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+
+/**
+ * ChangeSet collects and performs changes to an archive.
+ * Putting delete changes in this ChangeSet from multiple threads can
+ * cause conflicts.
+ *
+ * @NotThreadSafe
+ */
+public final class ChangeSet {
+
+ private final Set changes = new LinkedHashSet();
+
+ /**
+ * Deletes the file with the filename from the archive.
+ *
+ * @param filename
+ * the filename of the file to delete
+ */
+ public void delete(final String filename) {
+ addDeletion(new Change(filename, Change.TYPE_DELETE));
+ }
+
+ /**
+ * Deletes the directory tree from the archive.
+ *
+ * @param dirName
+ * the name of the directory tree to delete
+ */
+ public void deleteDir(final String dirName) {
+ addDeletion(new Change(dirName, Change.TYPE_DELETE_DIR));
+ }
+
+ /**
+ * Adds a new archive entry to the archive.
+ *
+ * @param pEntry
+ * the entry to add
+ * @param pInput
+ * the datastream to add
+ */
+ public void add(final ArchiveEntry pEntry, final InputStream pInput) {
+ this.add(pEntry, pInput, true);
+ }
+
+ /**
+ * Adds a new archive entry to the archive.
+ * If replace is set to true, this change will replace all other additions
+ * done in this ChangeSet and all existing entries in the original stream.
+ *
+ * @param pEntry
+ * the entry to add
+ * @param pInput
+ * the datastream to add
+ * @param replace
+ * indicates the this change should replace existing entries
+ */
+ public void add(final ArchiveEntry pEntry, final InputStream pInput, final boolean replace) {
+ addAddition(new Change(pEntry, pInput, replace));
+ }
+
+ /**
+ * Adds an addition change.
+ *
+ * @param pChange
+ * the change which should result in an addition
+ */
+ private void addAddition(Change pChange) {
+ if (Change.TYPE_ADD != pChange.type() ||
+ pChange.getInput() == null) {
+ return;
+ }
+
+ if (!changes.isEmpty()) {
+ for (Iterator it = changes.iterator(); it.hasNext();) {
+ Change change = it.next();
+ if (change.type() == Change.TYPE_ADD
+ && change.getEntry() != null) {
+ ArchiveEntry entry = change.getEntry();
+
+ if(entry.equals(pChange.getEntry())) {
+ if(pChange.isReplaceMode()) {
+ it.remove();
+ changes.add(pChange);
+ return;
+ } else {
+ // do not add this change
+ return;
+ }
+ }
+ }
+ }
+ }
+ changes.add(pChange);
+ }
+
+ /**
+ * Adds an delete change.
+ *
+ * @param pChange
+ * the change which should result in a deletion
+ */
+ private void addDeletion(Change pChange) {
+ if ((Change.TYPE_DELETE != pChange.type() &&
+ Change.TYPE_DELETE_DIR != pChange.type()) ||
+ pChange.targetFile() == null) {
+ return;
+ }
+ String source = pChange.targetFile();
+
+ if (!changes.isEmpty()) {
+ for (Iterator it = changes.iterator(); it.hasNext();) {
+ Change change = it.next();
+ if (change.type() == Change.TYPE_ADD
+ && change.getEntry() != null) {
+ String target = change.getEntry().getName();
+
+ if (Change.TYPE_DELETE == pChange.type() && source.equals(target)) {
+ it.remove();
+ } else if (Change.TYPE_DELETE_DIR == pChange.type() &&
+ target.matches(source + "/.*")) {
+ it.remove();
+ }
+ }
+ }
+ }
+ changes.add(pChange);
+ }
+
+ /**
+ * Returns the list of changes as a copy. Changes on this set
+ * are not reflected on this ChangeSet and vice versa.
+ * @return the changes as a copy
+ */
+ Set getChanges() {
+ return new LinkedHashSet(changes);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/changes/ChangeSetPerformer.java b/Tools/Cache Editor/src/org/apache/commons/compress/changes/ChangeSetPerformer.java
new file mode 100644
index 000000000..8ed861ce8
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/changes/ChangeSetPerformer.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.changes;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Enumeration;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.Set;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveInputStream;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
+import org.apache.commons.compress.archivers.zip.ZipFile;
+import org.apache.commons.compress.utils.IOUtils;
+
+/**
+ * Performs ChangeSet operations on a stream.
+ * This class is thread safe and can be used multiple times.
+ * It operates on a copy of the ChangeSet. If the ChangeSet changes,
+ * a new Performer must be created.
+ *
+ * @ThreadSafe
+ * @Immutable
+ */
+public class ChangeSetPerformer {
+ private final Set changes;
+
+ /**
+ * Constructs a ChangeSetPerformer with the changes from this ChangeSet
+ * @param changeSet the ChangeSet which operations are used for performing
+ */
+ public ChangeSetPerformer(final ChangeSet changeSet) {
+ changes = changeSet.getChanges();
+ }
+
+ /**
+ * Performs all changes collected in this ChangeSet on the input stream and
+ * streams the result to the output stream. Perform may be called more than once.
+ *
+ * This method finishes the stream, no other entries should be added
+ * after that.
+ *
+ * @param in
+ * the InputStream to perform the changes on
+ * @param out
+ * the resulting OutputStream with all modifications
+ * @throws IOException
+ * if an read/write error occurs
+ * @return the results of this operation
+ */
+ public ChangeSetResults perform(ArchiveInputStream in, ArchiveOutputStream out)
+ throws IOException {
+ return perform(new ArchiveInputStreamIterator(in), out);
+ }
+
+ /**
+ * Performs all changes collected in this ChangeSet on the ZipFile and
+ * streams the result to the output stream. Perform may be called more than once.
+ *
+ * This method finishes the stream, no other entries should be added
+ * after that.
+ *
+ * @param in
+ * the ZipFile to perform the changes on
+ * @param out
+ * the resulting OutputStream with all modifications
+ * @throws IOException
+ * if an read/write error occurs
+ * @return the results of this operation
+ * @since 1.5
+ */
+ public ChangeSetResults perform(ZipFile in, ArchiveOutputStream out)
+ throws IOException {
+ return perform(new ZipFileIterator(in), out);
+ }
+
+ /**
+ * Performs all changes collected in this ChangeSet on the input entries and
+ * streams the result to the output stream.
+ *
+ * This method finishes the stream, no other entries should be added
+ * after that.
+ *
+ * @param entryIterator
+ * the entries to perform the changes on
+ * @param out
+ * the resulting OutputStream with all modifications
+ * @throws IOException
+ * if an read/write error occurs
+ * @return the results of this operation
+ */
+ private ChangeSetResults perform(ArchiveEntryIterator entryIterator,
+ ArchiveOutputStream out)
+ throws IOException {
+ ChangeSetResults results = new ChangeSetResults();
+
+ Set workingSet = new LinkedHashSet(changes);
+
+ for (Iterator it = workingSet.iterator(); it.hasNext();) {
+ Change change = it.next();
+
+ if (change.type() == Change.TYPE_ADD && change.isReplaceMode()) {
+ copyStream(change.getInput(), out, change.getEntry());
+ it.remove();
+ results.addedFromChangeSet(change.getEntry().getName());
+ }
+ }
+
+ while (entryIterator.hasNext()) {
+ ArchiveEntry entry = entryIterator.next();
+ boolean copy = true;
+
+ for (Iterator it = workingSet.iterator(); it.hasNext();) {
+ Change change = it.next();
+
+ final int type = change.type();
+ final String name = entry.getName();
+ if (type == Change.TYPE_DELETE && name != null) {
+ if (name.equals(change.targetFile())) {
+ copy = false;
+ it.remove();
+ results.deleted(name);
+ break;
+ }
+ } else if (type == Change.TYPE_DELETE_DIR && name != null) {
+ // don't combine ifs to make future extensions more easy
+ if (name.startsWith(change.targetFile() + "/")) { // NOPMD
+ copy = false;
+ results.deleted(name);
+ break;
+ }
+ }
+ }
+
+ if (copy
+ && !isDeletedLater(workingSet, entry)
+ && !results.hasBeenAdded(entry.getName())) {
+ copyStream(entryIterator.getInputStream(), out, entry);
+ results.addedFromStream(entry.getName());
+ }
+ }
+
+ // Adds files which hasn't been added from the original and do not have replace mode on
+ for (Iterator it = workingSet.iterator(); it.hasNext();) {
+ Change change = it.next();
+
+ if (change.type() == Change.TYPE_ADD &&
+ !change.isReplaceMode() &&
+ !results.hasBeenAdded(change.getEntry().getName())) {
+ copyStream(change.getInput(), out, change.getEntry());
+ it.remove();
+ results.addedFromChangeSet(change.getEntry().getName());
+ }
+ }
+ out.finish();
+ return results;
+ }
+
+ /**
+ * Checks if an ArchiveEntry is deleted later in the ChangeSet. This is
+ * necessary if an file is added with this ChangeSet, but later became
+ * deleted in the same set.
+ *
+ * @param entry
+ * the entry to check
+ * @return true, if this entry has an deletion change later, false otherwise
+ */
+ private boolean isDeletedLater(Set workingSet, ArchiveEntry entry) {
+ String source = entry.getName();
+
+ if (!workingSet.isEmpty()) {
+ for (Change change : workingSet) {
+ final int type = change.type();
+ String target = change.targetFile();
+ if (type == Change.TYPE_DELETE && source.equals(target)) {
+ return true;
+ }
+
+ if (type == Change.TYPE_DELETE_DIR && source.startsWith(target + "/")){
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Copies the ArchiveEntry to the Output stream
+ *
+ * @param in
+ * the stream to read the data from
+ * @param out
+ * the stream to write the data to
+ * @param entry
+ * the entry to write
+ * @throws IOException
+ * if data cannot be read or written
+ */
+ private void copyStream(InputStream in, ArchiveOutputStream out,
+ ArchiveEntry entry) throws IOException {
+ out.putArchiveEntry(entry);
+ IOUtils.copy(in, out);
+ out.closeArchiveEntry();
+ }
+
+ /**
+ * Used in perform to abstract out getting entries and streams for
+ * those entries.
+ *
+ * Iterator#hasNext is not allowed to throw exceptions that's
+ * why we can't use Iterator<ArchiveEntry> directly -
+ * otherwise we'd need to convert exceptions thrown in
+ * ArchiveInputStream#getNextEntry.
+ */
+ interface ArchiveEntryIterator {
+ boolean hasNext() throws IOException;
+ ArchiveEntry next();
+ InputStream getInputStream() throws IOException;
+ }
+
+ private static class ArchiveInputStreamIterator
+ implements ArchiveEntryIterator {
+ private final ArchiveInputStream in;
+ private ArchiveEntry next;
+ ArchiveInputStreamIterator(ArchiveInputStream in) {
+ this.in = in;
+ }
+ public boolean hasNext() throws IOException {
+ return (next = in.getNextEntry()) != null;
+ }
+ public ArchiveEntry next() {
+ return next;
+ }
+ public InputStream getInputStream() {
+ return in;
+ }
+ }
+
+ private static class ZipFileIterator
+ implements ArchiveEntryIterator {
+ private final ZipFile in;
+ private final Enumeration nestedEnum;
+ private ZipArchiveEntry current;
+ ZipFileIterator(ZipFile in) {
+ this.in = in;
+ nestedEnum = in.getEntriesInPhysicalOrder();
+ }
+ public boolean hasNext() {
+ return nestedEnum.hasMoreElements();
+ }
+ public ArchiveEntry next() {
+ return current = nestedEnum.nextElement();
+ }
+ public InputStream getInputStream() throws IOException {
+ return in.getInputStream(current);
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/changes/ChangeSetResults.java b/Tools/Cache Editor/src/org/apache/commons/compress/changes/ChangeSetResults.java
new file mode 100644
index 000000000..d4f4cc20a
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/changes/ChangeSetResults.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.changes;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Stores the results of an performed ChangeSet operation.
+ */
+public class ChangeSetResults {
+ private final List addedFromChangeSet = new ArrayList();
+ private final List addedFromStream = new ArrayList();
+ private final List deleted = new ArrayList();
+
+ /**
+ * Adds the filename of a recently deleted file to the result list.
+ * @param fileName the file which has been deleted
+ */
+ void deleted(String fileName) {
+ deleted.add(fileName);
+ }
+
+ /**
+ * Adds the name of a file to the result list which has been
+ * copied from the source stream to the target stream.
+ * @param fileName the file name which has been added from the original stream
+ */
+ void addedFromStream(String fileName) {
+ addedFromStream.add(fileName);
+ }
+
+ /**
+ * Adds the name of a file to the result list which has been
+ * copied from the changeset to the target stream
+ * @param fileName the name of the file
+ */
+ void addedFromChangeSet(String fileName) {
+ addedFromChangeSet.add(fileName);
+ }
+
+ /**
+ * Returns a list of filenames which has been added from the changeset
+ * @return the list of filenames
+ */
+ public List getAddedFromChangeSet() {
+ return addedFromChangeSet;
+ }
+
+ /**
+ * Returns a list of filenames which has been added from the original stream
+ * @return the list of filenames
+ */
+ public List getAddedFromStream() {
+ return addedFromStream;
+ }
+
+ /**
+ * Returns a list of filenames which has been deleted
+ * @return the list of filenames
+ */
+ public List getDeleted() {
+ return deleted;
+ }
+
+ /**
+ * Checks if an filename already has been added to the result list
+ * @param filename the filename to check
+ * @return true, if this filename already has been added
+ */
+ boolean hasBeenAdded(String filename) {
+ if(addedFromChangeSet.contains(filename) || addedFromStream.contains(filename)) {
+ return true;
+ }
+ return false;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/changes/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/changes/package.html
new file mode 100644
index 000000000..4ba3e87d0
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/changes/package.html
@@ -0,0 +1,27 @@
+
+
+
+ EXPERIMENTAL support for changesets that are applied to
+ archives.
+
+ This API is considered unstable and may be modified or even
+ removed in future releases.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorException.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorException.java
new file mode 100644
index 000000000..aea05b32b
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorException.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors;
+
+/**
+ * Compressor related exception
+ */
+public class CompressorException extends Exception {
+
+ /** Serial */
+ private static final long serialVersionUID = -2932901310255908814L;
+
+ /**
+ * Constructs a new exception with the specified detail message. The cause
+ * is not initialized.
+ *
+ * @param message
+ * the detail message
+ */
+ public CompressorException(String message) {
+ super(message);
+ }
+
+ /**
+ * Constructs a new exception with the specified detail message and cause.
+ *
+ * @param message
+ * the detail message
+ * @param cause
+ * the cause
+ */
+ public CompressorException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorInputStream.java
new file mode 100644
index 000000000..52b161ba5
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorInputStream.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors;
+
+import java.io.InputStream;
+
+public abstract class CompressorInputStream extends InputStream {
+ private long bytesRead = 0;
+
+ /**
+ * Increments the counter of already read bytes.
+ * Doesn't increment if the EOF has been hit (read == -1)
+ *
+ * @param read the number of bytes read
+ *
+ * @since 1.1
+ */
+ protected void count(int read) {
+ count((long) read);
+ }
+
+ /**
+ * Increments the counter of already read bytes.
+ * Doesn't increment if the EOF has been hit (read == -1)
+ *
+ * @param read the number of bytes read
+ */
+ protected void count(long read) {
+ if (read != -1) {
+ bytesRead = bytesRead + read;
+ }
+ }
+
+ /**
+ * Decrements the counter of already read bytes.
+ *
+ * @param pushedBack the number of bytes pushed back.
+ * @since 1.7
+ */
+ protected void pushedBackBytes(long pushedBack) {
+ bytesRead -= pushedBack;
+ }
+
+ /**
+ * Returns the current number of bytes read from this stream.
+ * @return the number of read bytes
+ * @deprecated this method may yield wrong results for large
+ * archives, use #getBytesRead instead
+ */
+ @Deprecated
+ public int getCount() {
+ return (int) bytesRead;
+ }
+
+ /**
+ * Returns the current number of bytes read from this stream.
+ * @return the number of read bytes
+ *
+ * @since 1.1
+ */
+ public long getBytesRead() {
+ return bytesRead;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorOutputStream.java
new file mode 100644
index 000000000..51eee9cee
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorOutputStream.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors;
+
+import java.io.OutputStream;
+
+public abstract class CompressorOutputStream extends OutputStream {
+ // TODO
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorStreamFactory.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorStreamFactory.java
new file mode 100644
index 000000000..d2ae16d13
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/CompressorStreamFactory.java
@@ -0,0 +1,268 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
+import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream;
+import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
+import org.apache.commons.compress.compressors.gzip.GzipCompressorOutputStream;
+import org.apache.commons.compress.compressors.pack200.Pack200CompressorInputStream;
+import org.apache.commons.compress.compressors.pack200.Pack200CompressorOutputStream;
+import org.apache.commons.compress.compressors.snappy.FramedSnappyCompressorInputStream;
+import org.apache.commons.compress.compressors.snappy.SnappyCompressorInputStream;
+import org.apache.commons.compress.compressors.z.ZCompressorInputStream;
+import org.apache.commons.compress.utils.IOUtils;
+
+/**
+ * Factory to create Compressor[In|Out]putStreams from names. To add other
+ * implementations you should extend CompressorStreamFactory and override the
+ * appropriate methods (and call their implementation from super of course).
+ *
+ * Example (Compressing a file):
+ *
+ *
+ * final OutputStream out = new FileOutputStream(output);
+ * CompressorOutputStream cos =
+ * new CompressorStreamFactory().createCompressorOutputStream(CompressorStreamFactory.BZIP2, out);
+ * IOUtils.copy(new FileInputStream(input), cos);
+ * cos.close();
+ *
+ *
+ * Example (Decompressing a file):
+ *
+ * final InputStream is = new FileInputStream(input);
+ * CompressorInputStream in =
+ * new CompressorStreamFactory().createCompressorInputStream(CompressorStreamFactory.BZIP2, is);
+ * IOUtils.copy(in, new FileOutputStream(output));
+ * in.close();
+ *
+ *
+ * @Immutable
+ */
+public class CompressorStreamFactory {
+
+ /**
+ * Constant used to identify the BZIP2 compression algorithm.
+ * @since 1.1
+ */
+ public static final String BZIP2 = "bzip2";
+
+ /**
+ * Constant used to identify the GZIP compression algorithm.
+ * @since 1.1
+ */
+ public static final String GZIP = "gz";
+ /**
+ * Constant used to identify the PACK200 compression algorithm.
+ * @since 1.3
+ */
+ public static final String PACK200 = "pack200";
+
+ /**
+ * Constant used to identify the XZ compression method.
+ * @since 1.4
+ */
+ public static final String XZ = "xz";
+
+ /**
+ * Constant used to identify the LZMA compression method.
+ * @since 1.6
+ */
+ public static final String LZMA = "lzma";
+
+ /**
+ * Constant used to identify the "framed" Snappy compression method.
+ * @since 1.7
+ */
+ public static final String SNAPPY_FRAMED = "snappy-framed";
+
+ /**
+ * Constant used to identify the "raw" Snappy compression method.
+ * @since 1.7
+ */
+ public static final String SNAPPY_RAW = "snappy-raw";
+
+ /**
+ * Constant used to identify the traditional Unix compress method.
+ * @since 1.7
+ */
+ public static final String Z = "z";
+
+ private boolean decompressConcatenated = false;
+
+ /**
+ * Whether to decompress the full input or only the first stream
+ * in formats supporting multiple concatenated input streams.
+ *
+ * This setting applies to the gzip, bzip2 and xz formats only.
+ *
+ * @param decompressConcatenated
+ * if true, decompress until the end of the
+ * input; if false, stop after the first
+ * stream and leave the input position to point
+ * to the next byte after the stream
+ * @since 1.5
+ */
+ public void setDecompressConcatenated(boolean decompressConcatenated) {
+ this.decompressConcatenated = decompressConcatenated;
+ }
+
+ /**
+ * Create an compressor input stream from an input stream, autodetecting
+ * the compressor type from the first few bytes of the stream. The InputStream
+ * must support marks, like BufferedInputStream.
+ *
+ * @param in the input stream
+ * @return the compressor input stream
+ * @throws CompressorException if the compressor name is not known
+ * @throws IllegalArgumentException if the stream is null or does not support mark
+ * @since 1.1
+ */
+ public CompressorInputStream createCompressorInputStream(final InputStream in)
+ throws CompressorException {
+ if (in == null) {
+ throw new IllegalArgumentException("Stream must not be null.");
+ }
+
+ if (!in.markSupported()) {
+ throw new IllegalArgumentException("Mark is not supported.");
+ }
+
+ final byte[] signature = new byte[12];
+ in.mark(signature.length);
+ try {
+ int signatureLength = IOUtils.readFully(in, signature);
+ in.reset();
+
+ if (BZip2CompressorInputStream.matches(signature, signatureLength)) {
+ return new BZip2CompressorInputStream(in, decompressConcatenated);
+ }
+
+ if (GzipCompressorInputStream.matches(signature, signatureLength)) {
+ return new GzipCompressorInputStream(in, decompressConcatenated);
+ }
+
+
+ if (Pack200CompressorInputStream.matches(signature, signatureLength)) {
+ return new Pack200CompressorInputStream(in);
+ }
+
+ if (FramedSnappyCompressorInputStream.matches(signature, signatureLength)) {
+ return new FramedSnappyCompressorInputStream(in);
+ }
+
+ } catch (IOException e) {
+ throw new CompressorException("Failed to detect Compressor from InputStream.", e);
+ }
+
+ throw new CompressorException("No Compressor found for the stream signature.");
+ }
+
+ /**
+ * Create a compressor input stream from a compressor name and an input stream.
+ *
+ * @param name of the compressor, i.e. "gz", "bzip2", "xz",
+ * "lzma", "snappy-raw", "snappy-framed", "pack200", "z"
+ * @param in the input stream
+ * @return compressor input stream
+ * @throws CompressorException if the compressor name is not known
+ * @throws IllegalArgumentException if the name or input stream is null
+ */
+ public CompressorInputStream createCompressorInputStream(final String name,
+ final InputStream in) throws CompressorException {
+ if (name == null || in == null) {
+ throw new IllegalArgumentException(
+ "Compressor name and stream must not be null.");
+ }
+
+ try {
+
+ if (GZIP.equalsIgnoreCase(name)) {
+ return new GzipCompressorInputStream(in);
+ }
+
+ if (BZIP2.equalsIgnoreCase(name)) {
+ return new BZip2CompressorInputStream(in);
+ }
+
+ if (PACK200.equalsIgnoreCase(name)) {
+ return new Pack200CompressorInputStream(in);
+ }
+
+ if (SNAPPY_RAW.equalsIgnoreCase(name)) {
+ return new SnappyCompressorInputStream(in);
+ }
+
+ if (SNAPPY_FRAMED.equalsIgnoreCase(name)) {
+ return new FramedSnappyCompressorInputStream(in);
+ }
+
+ if (Z.equalsIgnoreCase(name)) {
+ return new ZCompressorInputStream(in);
+ }
+
+ } catch (IOException e) {
+ throw new CompressorException(
+ "Could not create CompressorInputStream.", e);
+ }
+ throw new CompressorException("Compressor: " + name + " not found.");
+ }
+
+ /**
+ * Create an compressor output stream from an compressor name and an input stream.
+ *
+ * @param name the compressor name, i.e. "gz", "bzip2", "xz", or "pack200"
+ * @param out the output stream
+ * @return the compressor output stream
+ * @throws CompressorException if the archiver name is not known
+ * @throws IllegalArgumentException if the archiver name or stream is null
+ */
+ public CompressorOutputStream createCompressorOutputStream(
+ final String name, final OutputStream out)
+ throws CompressorException {
+ if (name == null || out == null) {
+ throw new IllegalArgumentException(
+ "Compressor name and stream must not be null.");
+ }
+
+ try {
+
+ if (GZIP.equalsIgnoreCase(name)) {
+ return new GzipCompressorOutputStream(out);
+ }
+
+ if (BZIP2.equalsIgnoreCase(name)) {
+ return new BZip2CompressorOutputStream(out);
+ }
+
+ if (PACK200.equalsIgnoreCase(name)) {
+ return new Pack200CompressorOutputStream(out);
+ }
+
+ } catch (IOException e) {
+ throw new CompressorException(
+ "Could not create CompressorOutputStream", e);
+ }
+ throw new CompressorException("Compressor: " + name + " not found.");
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/FileNameUtil.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/FileNameUtil.java
new file mode 100644
index 000000000..6accafd4a
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/FileNameUtil.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * File name mapping code for the compression formats.
+ * @ThreadSafe
+ * @since 1.4
+ */
+public class FileNameUtil {
+
+ /**
+ * Map from common filename suffixes to the suffixes that identify compressed
+ * versions of those file types. For example: from ".tar" to ".tgz".
+ */
+ private final Map compressSuffix =
+ new HashMap();
+
+ /**
+ * Map from common filename suffixes of compressed files to the
+ * corresponding suffixes of uncompressed files. For example: from
+ * ".tgz" to ".tar".
+ *
+ * This map also contains format-specific suffixes like ".gz" and "-z".
+ * These suffixes are mapped to the empty string, as they should simply
+ * be removed from the filename when the file is uncompressed.
+ */
+ private final Map uncompressSuffix;
+
+ /**
+ * Length of the longest compressed suffix.
+ */
+ private final int longestCompressedSuffix;
+
+ /**
+ * Length of the shortest compressed suffix.
+ */
+ private final int shortestCompressedSuffix;
+
+ /**
+ * Length of the longest uncompressed suffix.
+ */
+ private final int longestUncompressedSuffix;
+
+ /**
+ * Length of the shortest uncompressed suffix longer than the
+ * empty string.
+ */
+ private final int shortestUncompressedSuffix;
+
+ /**
+ * The format's default extension.
+ */
+ private final String defaultExtension;
+
+ /**
+ * sets up the utility with a map of known compressed to
+ * uncompressed suffix mappings and the default extension of the
+ * format.
+ *
+ * @param uncompressSuffix Map from common filename suffixes of
+ * compressed files to the corresponding suffixes of uncompressed
+ * files. For example: from ".tgz" to ".tar". This map also
+ * contains format-specific suffixes like ".gz" and "-z". These
+ * suffixes are mapped to the empty string, as they should simply
+ * be removed from the filename when the file is uncompressed.
+ *
+ * @param defaultExtension the format's default extension like ".gz"
+ */
+ public FileNameUtil(Map uncompressSuffix,
+ String defaultExtension) {
+ this.uncompressSuffix = Collections.unmodifiableMap(uncompressSuffix);
+ int lc = Integer.MIN_VALUE, sc = Integer.MAX_VALUE;
+ int lu = Integer.MIN_VALUE, su = Integer.MAX_VALUE;
+ for (Map.Entry ent : uncompressSuffix.entrySet()) {
+ int cl = ent.getKey().length();
+ if (cl > lc) {
+ lc = cl;
+ }
+ if (cl < sc) {
+ sc = cl;
+ }
+
+ String u = ent.getValue();
+ int ul = u.length();
+ if (ul > 0) {
+ if (!compressSuffix.containsKey(u)) {
+ compressSuffix.put(u, ent.getKey());
+ }
+ if (ul > lu) {
+ lu = ul;
+ }
+ if (ul < su) {
+ su = ul;
+ }
+ }
+ }
+ longestCompressedSuffix = lc;
+ longestUncompressedSuffix = lu;
+ shortestCompressedSuffix = sc;
+ shortestUncompressedSuffix = su;
+ this.defaultExtension = defaultExtension;
+ }
+
+ /**
+ * Detects common format suffixes in the given filename.
+ *
+ * @param filename name of a file
+ * @return {@code true} if the filename has a common format suffix,
+ * {@code false} otherwise
+ */
+ public boolean isCompressedFilename(String filename) {
+ final String lower = filename.toLowerCase(Locale.ENGLISH);
+ final int n = lower.length();
+ for (int i = shortestCompressedSuffix;
+ i <= longestCompressedSuffix && i < n; i++) {
+ if (uncompressSuffix.containsKey(lower.substring(n - i))) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Maps the given name of a compressed file to the name that the
+ * file should have after uncompression. Commonly used file type specific
+ * suffixes like ".tgz" or ".svgz" are automatically detected and
+ * correctly mapped. For example the name "package.tgz" is mapped to
+ * "package.tar". And any filenames with the generic ".gz" suffix
+ * (or any other generic gzip suffix) is mapped to a name without that
+ * suffix. If no format suffix is detected, then the filename is returned
+ * unmapped.
+ *
+ * @param filename name of a file
+ * @return name of the corresponding uncompressed file
+ */
+ public String getUncompressedFilename(String filename) {
+ final String lower = filename.toLowerCase(Locale.ENGLISH);
+ final int n = lower.length();
+ for (int i = shortestCompressedSuffix;
+ i <= longestCompressedSuffix && i < n; i++) {
+ String suffix = uncompressSuffix.get(lower.substring(n - i));
+ if (suffix != null) {
+ return filename.substring(0, n - i) + suffix;
+ }
+ }
+ return filename;
+ }
+
+ /**
+ * Maps the given filename to the name that the file should have after
+ * compression. Common file types with custom suffixes for
+ * compressed versions are automatically detected and correctly mapped.
+ * For example the name "package.tar" is mapped to "package.tgz". If no
+ * custom mapping is applicable, then the default ".gz" suffix is appended
+ * to the filename.
+ *
+ * @param filename name of a file
+ * @return name of the corresponding compressed file
+ */
+ public String getCompressedFilename(String filename) {
+ final String lower = filename.toLowerCase(Locale.ENGLISH);
+ final int n = lower.length();
+ for (int i = shortestUncompressedSuffix;
+ i <= longestUncompressedSuffix && i < n; i++) {
+ String suffix = compressSuffix.get(lower.substring(n - i));
+ if (suffix != null) {
+ return filename.substring(0, n - i) + suffix;
+ }
+ }
+ // No custom suffix found, just append the default
+ return filename + defaultExtension;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorInputStream.java
new file mode 100644
index 000000000..1785d9405
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorInputStream.java
@@ -0,0 +1,1046 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*
+ * This package is based on the work done by Keiron Liddle, Aftex Software
+ * to whom the Ant project is very grateful for his
+ * great code.
+ */
+package org.apache.commons.compress.compressors.bzip2;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.commons.compress.compressors.CompressorInputStream;
+
+/**
+ * An input stream that decompresses from the BZip2 format to be read as any other stream.
+ *
+ * @NotThreadSafe
+ */
+public class BZip2CompressorInputStream extends CompressorInputStream implements
+ BZip2Constants {
+
+ /**
+ * Index of the last char in the block, so the block size == last + 1.
+ */
+ private int last;
+
+ /**
+ * Index in zptr[] of original string after sorting.
+ */
+ private int origPtr;
+
+ /**
+ * always: in the range 0 .. 9. The current block size is 100000 * this
+ * number.
+ */
+ private int blockSize100k;
+
+ private boolean blockRandomised;
+
+ private int bsBuff;
+ private int bsLive;
+ private final CRC crc = new CRC();
+
+ private int nInUse;
+
+ private InputStream in;
+ private final boolean decompressConcatenated;
+
+ private int currentChar = -1;
+
+ private static final int EOF = 0;
+ private static final int START_BLOCK_STATE = 1;
+ private static final int RAND_PART_A_STATE = 2;
+ private static final int RAND_PART_B_STATE = 3;
+ private static final int RAND_PART_C_STATE = 4;
+ private static final int NO_RAND_PART_A_STATE = 5;
+ private static final int NO_RAND_PART_B_STATE = 6;
+ private static final int NO_RAND_PART_C_STATE = 7;
+
+ private int currentState = START_BLOCK_STATE;
+
+ private int storedBlockCRC, storedCombinedCRC;
+ private int computedBlockCRC, computedCombinedCRC;
+
+ // Variables used by setup* methods exclusively
+
+ private int su_count;
+ private int su_ch2;
+ private int su_chPrev;
+ private int su_i2;
+ private int su_j2;
+ private int su_rNToGo;
+ private int su_rTPos;
+ private int su_tPos;
+ private char su_z;
+
+ /**
+ * All memory intensive stuff. This field is initialized by initBlock().
+ */
+ private BZip2CompressorInputStream.Data data;
+
+ /**
+ * Constructs a new BZip2CompressorInputStream which decompresses bytes
+ * read from the specified stream. This doesn't suppprt decompressing
+ * concatenated .bz2 files.
+ *
+ * @throws IOException
+ * if the stream content is malformed or an I/O error occurs.
+ * @throws NullPointerException
+ * if in == null
+ */
+ public BZip2CompressorInputStream(final InputStream in) throws IOException {
+ this(in, false);
+ }
+
+ /**
+ * Constructs a new BZip2CompressorInputStream which decompresses bytes
+ * read from the specified stream.
+ *
+ * @param in the InputStream from which this object should be created
+ * @param decompressConcatenated
+ * if true, decompress until the end of the input;
+ * if false, stop after the first .bz2 stream and
+ * leave the input position to point to the next
+ * byte after the .bz2 stream
+ *
+ * @throws IOException
+ * if the stream content is malformed or an I/O error occurs.
+ * @throws NullPointerException
+ * if in == null
+ */
+ public BZip2CompressorInputStream(final InputStream in, final boolean decompressConcatenated) throws IOException {
+ this.in = in;
+ this.decompressConcatenated = decompressConcatenated;
+
+ init(true);
+ initBlock();
+ setupBlock();
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (this.in != null) {
+ int r = read0();
+ count(r < 0 ? -1 : 1);
+ return r;
+ } else {
+ throw new IOException("stream closed");
+ }
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.io.InputStream#read(byte[], int, int)
+ */
+ @Override
+ public int read(final byte[] dest, final int offs, final int len)
+ throws IOException {
+ if (offs < 0) {
+ throw new IndexOutOfBoundsException("offs(" + offs + ") < 0.");
+ }
+ if (len < 0) {
+ throw new IndexOutOfBoundsException("len(" + len + ") < 0.");
+ }
+ if (offs + len > dest.length) {
+ throw new IndexOutOfBoundsException("offs(" + offs + ") + len("
+ + len + ") > dest.length(" + dest.length + ").");
+ }
+ if (this.in == null) {
+ throw new IOException("stream closed");
+ }
+
+ final int hi = offs + len;
+ int destOffs = offs;
+ for (int b; (destOffs < hi) && ((b = read0()) >= 0);) {
+ dest[destOffs++] = (byte) b;
+ }
+
+ int c = (destOffs == offs) ? -1 : (destOffs - offs);
+ count(c);
+ return c;
+ }
+
+ private void makeMaps() {
+ final boolean[] inUse = this.data.inUse;
+ final byte[] seqToUnseq = this.data.seqToUnseq;
+
+ int nInUseShadow = 0;
+
+ for (int i = 0; i < 256; i++) {
+ if (inUse[i]) {
+ seqToUnseq[nInUseShadow++] = (byte) i;
+ }
+ }
+
+ this.nInUse = nInUseShadow;
+ }
+
+ private int read0() throws IOException {
+ final int retChar = this.currentChar;
+
+ switch (this.currentState) {
+ case EOF:
+ return -1;
+
+ case START_BLOCK_STATE:
+ throw new IllegalStateException();
+
+ case RAND_PART_A_STATE:
+ throw new IllegalStateException();
+
+ case RAND_PART_B_STATE:
+ setupRandPartB();
+ break;
+
+ case RAND_PART_C_STATE:
+ setupRandPartC();
+ break;
+
+ case NO_RAND_PART_A_STATE:
+ throw new IllegalStateException();
+
+ case NO_RAND_PART_B_STATE:
+ setupNoRandPartB();
+ break;
+
+ case NO_RAND_PART_C_STATE:
+ setupNoRandPartC();
+ break;
+
+ default:
+ throw new IllegalStateException();
+ }
+
+ return retChar;
+ }
+
+ private boolean init(boolean isFirstStream) throws IOException {
+ if (null == in) {
+ throw new IOException("No InputStream");
+ }
+
+ int magic0 = this.in.read();
+ if (magic0 == -1 && !isFirstStream) {
+ return false;
+ }
+ int magic1 = this.in.read();
+ int magic2 = this.in.read();
+
+ if (magic0 != 'B' || magic1 != 'Z' || magic2 != 'h') {
+ throw new IOException(isFirstStream
+ ? "Stream is not in the BZip2 format"
+ : "Garbage after a valid BZip2 stream");
+ }
+
+ int blockSize = this.in.read();
+ if ((blockSize < '1') || (blockSize > '9')) {
+ throw new IOException("BZip2 block size is invalid");
+ }
+
+ this.blockSize100k = blockSize - '0';
+
+ this.bsLive = 0;
+ this.computedCombinedCRC = 0;
+
+ return true;
+ }
+
+ private void initBlock() throws IOException {
+ char magic0;
+ char magic1;
+ char magic2;
+ char magic3;
+ char magic4;
+ char magic5;
+
+ while (true) {
+ // Get the block magic bytes.
+ magic0 = bsGetUByte();
+ magic1 = bsGetUByte();
+ magic2 = bsGetUByte();
+ magic3 = bsGetUByte();
+ magic4 = bsGetUByte();
+ magic5 = bsGetUByte();
+
+ // If isn't end of stream magic, break out of the loop.
+ if (magic0 != 0x17 || magic1 != 0x72 || magic2 != 0x45
+ || magic3 != 0x38 || magic4 != 0x50 || magic5 != 0x90) {
+ break;
+ }
+
+ // End of stream was reached. Check the combined CRC and
+ // advance to the next .bz2 stream if decoding concatenated
+ // streams.
+ if (complete()) {
+ return;
+ }
+ }
+
+ if (magic0 != 0x31 || // '1'
+ magic1 != 0x41 || // ')'
+ magic2 != 0x59 || // 'Y'
+ magic3 != 0x26 || // '&'
+ magic4 != 0x53 || // 'S'
+ magic5 != 0x59 // 'Y'
+ ) {
+ this.currentState = EOF;
+ throw new IOException("bad block header");
+ } else {
+ this.storedBlockCRC = bsGetInt();
+ this.blockRandomised = bsR(1) == 1;
+
+ /**
+ * Allocate data here instead in constructor, so we do not allocate
+ * it if the input file is empty.
+ */
+ if (this.data == null) {
+ this.data = new Data(this.blockSize100k);
+ }
+
+ // currBlockNo++;
+ getAndMoveToFrontDecode();
+
+ this.crc.initialiseCRC();
+ this.currentState = START_BLOCK_STATE;
+ }
+ }
+
+ private void endBlock() throws IOException {
+ this.computedBlockCRC = this.crc.getFinalCRC();
+
+ // A bad CRC is considered a fatal error.
+ if (this.storedBlockCRC != this.computedBlockCRC) {
+ // make next blocks readable without error
+ // (repair feature, not yet documented, not tested)
+ this.computedCombinedCRC = (this.storedCombinedCRC << 1)
+ | (this.storedCombinedCRC >>> 31);
+ this.computedCombinedCRC ^= this.storedBlockCRC;
+
+ throw new IOException("BZip2 CRC error");
+ }
+
+ this.computedCombinedCRC = (this.computedCombinedCRC << 1)
+ | (this.computedCombinedCRC >>> 31);
+ this.computedCombinedCRC ^= this.computedBlockCRC;
+ }
+
+ private boolean complete() throws IOException {
+ this.storedCombinedCRC = bsGetInt();
+ this.currentState = EOF;
+ this.data = null;
+
+ if (this.storedCombinedCRC != this.computedCombinedCRC) {
+ throw new IOException("BZip2 CRC error");
+ }
+
+ // Look for the next .bz2 stream if decompressing
+ // concatenated files.
+ return !decompressConcatenated || !init(false);
+ }
+
+ @Override
+ public void close() throws IOException {
+ InputStream inShadow = this.in;
+ if (inShadow != null) {
+ try {
+ if (inShadow != System.in) {
+ inShadow.close();
+ }
+ } finally {
+ this.data = null;
+ this.in = null;
+ }
+ }
+ }
+
+ private int bsR(final int n) throws IOException {
+ int bsLiveShadow = this.bsLive;
+ int bsBuffShadow = this.bsBuff;
+
+ if (bsLiveShadow < n) {
+ final InputStream inShadow = this.in;
+ do {
+ int thech = inShadow.read();
+
+ if (thech < 0) {
+ throw new IOException("unexpected end of stream");
+ }
+
+ bsBuffShadow = (bsBuffShadow << 8) | thech;
+ bsLiveShadow += 8;
+ } while (bsLiveShadow < n);
+
+ this.bsBuff = bsBuffShadow;
+ }
+
+ this.bsLive = bsLiveShadow - n;
+ return (bsBuffShadow >> (bsLiveShadow - n)) & ((1 << n) - 1);
+ }
+
+ private boolean bsGetBit() throws IOException {
+ int bsLiveShadow = this.bsLive;
+ int bsBuffShadow = this.bsBuff;
+
+ if (bsLiveShadow < 1) {
+ int thech = this.in.read();
+
+ if (thech < 0) {
+ throw new IOException("unexpected end of stream");
+ }
+
+ bsBuffShadow = (bsBuffShadow << 8) | thech;
+ bsLiveShadow += 8;
+ this.bsBuff = bsBuffShadow;
+ }
+
+ this.bsLive = bsLiveShadow - 1;
+ return ((bsBuffShadow >> (bsLiveShadow - 1)) & 1) != 0;
+ }
+
+ private char bsGetUByte() throws IOException {
+ return (char) bsR(8);
+ }
+
+ private int bsGetInt() throws IOException {
+ return (((((bsR(8) << 8) | bsR(8)) << 8) | bsR(8)) << 8) | bsR(8);
+ }
+
+ /**
+ * Called by createHuffmanDecodingTables() exclusively.
+ */
+ private static void hbCreateDecodeTables(final int[] limit,
+ final int[] base, final int[] perm, final char[] length,
+ final int minLen, final int maxLen, final int alphaSize) {
+ for (int i = minLen, pp = 0; i <= maxLen; i++) {
+ for (int j = 0; j < alphaSize; j++) {
+ if (length[j] == i) {
+ perm[pp++] = j;
+ }
+ }
+ }
+
+ for (int i = MAX_CODE_LEN; --i > 0;) {
+ base[i] = 0;
+ limit[i] = 0;
+ }
+
+ for (int i = 0; i < alphaSize; i++) {
+ base[length[i] + 1]++;
+ }
+
+ for (int i = 1, b = base[0]; i < MAX_CODE_LEN; i++) {
+ b += base[i];
+ base[i] = b;
+ }
+
+ for (int i = minLen, vec = 0, b = base[i]; i <= maxLen; i++) {
+ final int nb = base[i + 1];
+ vec += nb - b;
+ b = nb;
+ limit[i] = vec - 1;
+ vec <<= 1;
+ }
+
+ for (int i = minLen + 1; i <= maxLen; i++) {
+ base[i] = ((limit[i - 1] + 1) << 1) - base[i];
+ }
+ }
+
+ private void recvDecodingTables() throws IOException {
+ final Data dataShadow = this.data;
+ final boolean[] inUse = dataShadow.inUse;
+ final byte[] pos = dataShadow.recvDecodingTables_pos;
+ final byte[] selector = dataShadow.selector;
+ final byte[] selectorMtf = dataShadow.selectorMtf;
+
+ int inUse16 = 0;
+
+ /* Receive the mapping table */
+ for (int i = 0; i < 16; i++) {
+ if (bsGetBit()) {
+ inUse16 |= 1 << i;
+ }
+ }
+
+ for (int i = 256; --i >= 0;) {
+ inUse[i] = false;
+ }
+
+ for (int i = 0; i < 16; i++) {
+ if ((inUse16 & (1 << i)) != 0) {
+ final int i16 = i << 4;
+ for (int j = 0; j < 16; j++) {
+ if (bsGetBit()) {
+ inUse[i16 + j] = true;
+ }
+ }
+ }
+ }
+
+ makeMaps();
+ final int alphaSize = this.nInUse + 2;
+
+ /* Now the selectors */
+ final int nGroups = bsR(3);
+ final int nSelectors = bsR(15);
+
+ for (int i = 0; i < nSelectors; i++) {
+ int j = 0;
+ while (bsGetBit()) {
+ j++;
+ }
+ selectorMtf[i] = (byte) j;
+ }
+
+ /* Undo the MTF values for the selectors. */
+ for (int v = nGroups; --v >= 0;) {
+ pos[v] = (byte) v;
+ }
+
+ for (int i = 0; i < nSelectors; i++) {
+ int v = selectorMtf[i] & 0xff;
+ final byte tmp = pos[v];
+ while (v > 0) {
+ // nearly all times v is zero, 4 in most other cases
+ pos[v] = pos[v - 1];
+ v--;
+ }
+ pos[0] = tmp;
+ selector[i] = tmp;
+ }
+
+ final char[][] len = dataShadow.temp_charArray2d;
+
+ /* Now the coding tables */
+ for (int t = 0; t < nGroups; t++) {
+ int curr = bsR(5);
+ final char[] len_t = len[t];
+ for (int i = 0; i < alphaSize; i++) {
+ while (bsGetBit()) {
+ curr += bsGetBit() ? -1 : 1;
+ }
+ len_t[i] = (char) curr;
+ }
+ }
+
+ // finally create the Huffman tables
+ createHuffmanDecodingTables(alphaSize, nGroups);
+ }
+
+ /**
+ * Called by recvDecodingTables() exclusively.
+ */
+ private void createHuffmanDecodingTables(final int alphaSize,
+ final int nGroups) {
+ final Data dataShadow = this.data;
+ final char[][] len = dataShadow.temp_charArray2d;
+ final int[] minLens = dataShadow.minLens;
+ final int[][] limit = dataShadow.limit;
+ final int[][] base = dataShadow.base;
+ final int[][] perm = dataShadow.perm;
+
+ for (int t = 0; t < nGroups; t++) {
+ int minLen = 32;
+ int maxLen = 0;
+ final char[] len_t = len[t];
+ for (int i = alphaSize; --i >= 0;) {
+ final char lent = len_t[i];
+ if (lent > maxLen) {
+ maxLen = lent;
+ }
+ if (lent < minLen) {
+ minLen = lent;
+ }
+ }
+ hbCreateDecodeTables(limit[t], base[t], perm[t], len[t], minLen,
+ maxLen, alphaSize);
+ minLens[t] = minLen;
+ }
+ }
+
+ private void getAndMoveToFrontDecode() throws IOException {
+ this.origPtr = bsR(24);
+ recvDecodingTables();
+
+ final InputStream inShadow = this.in;
+ final Data dataShadow = this.data;
+ final byte[] ll8 = dataShadow.ll8;
+ final int[] unzftab = dataShadow.unzftab;
+ final byte[] selector = dataShadow.selector;
+ final byte[] seqToUnseq = dataShadow.seqToUnseq;
+ final char[] yy = dataShadow.getAndMoveToFrontDecode_yy;
+ final int[] minLens = dataShadow.minLens;
+ final int[][] limit = dataShadow.limit;
+ final int[][] base = dataShadow.base;
+ final int[][] perm = dataShadow.perm;
+ final int limitLast = this.blockSize100k * 100000;
+
+ /*
+ * Setting up the unzftab entries here is not strictly necessary, but it
+ * does save having to do it later in a separate pass, and so saves a
+ * block's worth of cache misses.
+ */
+ for (int i = 256; --i >= 0;) {
+ yy[i] = (char) i;
+ unzftab[i] = 0;
+ }
+
+ int groupNo = 0;
+ int groupPos = G_SIZE - 1;
+ final int eob = this.nInUse + 1;
+ int nextSym = getAndMoveToFrontDecode0(0);
+ int bsBuffShadow = this.bsBuff;
+ int bsLiveShadow = this.bsLive;
+ int lastShadow = -1;
+ int zt = selector[groupNo] & 0xff;
+ int[] base_zt = base[zt];
+ int[] limit_zt = limit[zt];
+ int[] perm_zt = perm[zt];
+ int minLens_zt = minLens[zt];
+
+ while (nextSym != eob) {
+ if ((nextSym == RUNA) || (nextSym == RUNB)) {
+ int s = -1;
+
+ for (int n = 1; true; n <<= 1) {
+ if (nextSym == RUNA) {
+ s += n;
+ } else if (nextSym == RUNB) {
+ s += n << 1;
+ } else {
+ break;
+ }
+
+ if (groupPos == 0) {
+ groupPos = G_SIZE - 1;
+ zt = selector[++groupNo] & 0xff;
+ base_zt = base[zt];
+ limit_zt = limit[zt];
+ perm_zt = perm[zt];
+ minLens_zt = minLens[zt];
+ } else {
+ groupPos--;
+ }
+
+ int zn = minLens_zt;
+
+ // Inlined:
+ // int zvec = bsR(zn);
+ while (bsLiveShadow < zn) {
+ final int thech = inShadow.read();
+ if (thech >= 0) {
+ bsBuffShadow = (bsBuffShadow << 8) | thech;
+ bsLiveShadow += 8;
+ continue;
+ } else {
+ throw new IOException("unexpected end of stream");
+ }
+ }
+ int zvec = (bsBuffShadow >> (bsLiveShadow - zn))
+ & ((1 << zn) - 1);
+ bsLiveShadow -= zn;
+
+ while (zvec > limit_zt[zn]) {
+ zn++;
+ while (bsLiveShadow < 1) {
+ final int thech = inShadow.read();
+ if (thech >= 0) {
+ bsBuffShadow = (bsBuffShadow << 8) | thech;
+ bsLiveShadow += 8;
+ continue;
+ } else {
+ throw new IOException(
+ "unexpected end of stream");
+ }
+ }
+ bsLiveShadow--;
+ zvec = (zvec << 1)
+ | ((bsBuffShadow >> bsLiveShadow) & 1);
+ }
+ nextSym = perm_zt[zvec - base_zt[zn]];
+ }
+
+ final byte ch = seqToUnseq[yy[0]];
+ unzftab[ch & 0xff] += s + 1;
+
+ while (s-- >= 0) {
+ ll8[++lastShadow] = ch;
+ }
+
+ if (lastShadow >= limitLast) {
+ throw new IOException("block overrun");
+ }
+ } else {
+ if (++lastShadow >= limitLast) {
+ throw new IOException("block overrun");
+ }
+
+ final char tmp = yy[nextSym - 1];
+ unzftab[seqToUnseq[tmp] & 0xff]++;
+ ll8[lastShadow] = seqToUnseq[tmp];
+
+ /*
+ * This loop is hammered during decompression, hence avoid
+ * native method call overhead of System.arraycopy for very
+ * small ranges to copy.
+ */
+ if (nextSym <= 16) {
+ for (int j = nextSym - 1; j > 0;) {
+ yy[j] = yy[--j];
+ }
+ } else {
+ System.arraycopy(yy, 0, yy, 1, nextSym - 1);
+ }
+
+ yy[0] = tmp;
+
+ if (groupPos == 0) {
+ groupPos = G_SIZE - 1;
+ zt = selector[++groupNo] & 0xff;
+ base_zt = base[zt];
+ limit_zt = limit[zt];
+ perm_zt = perm[zt];
+ minLens_zt = minLens[zt];
+ } else {
+ groupPos--;
+ }
+
+ int zn = minLens_zt;
+
+ // Inlined:
+ // int zvec = bsR(zn);
+ while (bsLiveShadow < zn) {
+ final int thech = inShadow.read();
+ if (thech >= 0) {
+ bsBuffShadow = (bsBuffShadow << 8) | thech;
+ bsLiveShadow += 8;
+ continue;
+ } else {
+ throw new IOException("unexpected end of stream");
+ }
+ }
+ int zvec = (bsBuffShadow >> (bsLiveShadow - zn))
+ & ((1 << zn) - 1);
+ bsLiveShadow -= zn;
+
+ while (zvec > limit_zt[zn]) {
+ zn++;
+ while (bsLiveShadow < 1) {
+ final int thech = inShadow.read();
+ if (thech >= 0) {
+ bsBuffShadow = (bsBuffShadow << 8) | thech;
+ bsLiveShadow += 8;
+ continue;
+ } else {
+ throw new IOException("unexpected end of stream");
+ }
+ }
+ bsLiveShadow--;
+ zvec = (zvec << 1) | ((bsBuffShadow >> bsLiveShadow) & 1);
+ }
+ nextSym = perm_zt[zvec - base_zt[zn]];
+ }
+ }
+
+ this.last = lastShadow;
+ this.bsLive = bsLiveShadow;
+ this.bsBuff = bsBuffShadow;
+ }
+
+ private int getAndMoveToFrontDecode0(final int groupNo) throws IOException {
+ final InputStream inShadow = this.in;
+ final Data dataShadow = this.data;
+ final int zt = dataShadow.selector[groupNo] & 0xff;
+ final int[] limit_zt = dataShadow.limit[zt];
+ int zn = dataShadow.minLens[zt];
+ int zvec = bsR(zn);
+ int bsLiveShadow = this.bsLive;
+ int bsBuffShadow = this.bsBuff;
+
+ while (zvec > limit_zt[zn]) {
+ zn++;
+ while (bsLiveShadow < 1) {
+ final int thech = inShadow.read();
+
+ if (thech >= 0) {
+ bsBuffShadow = (bsBuffShadow << 8) | thech;
+ bsLiveShadow += 8;
+ continue;
+ } else {
+ throw new IOException("unexpected end of stream");
+ }
+ }
+ bsLiveShadow--;
+ zvec = (zvec << 1) | ((bsBuffShadow >> bsLiveShadow) & 1);
+ }
+
+ this.bsLive = bsLiveShadow;
+ this.bsBuff = bsBuffShadow;
+
+ return dataShadow.perm[zt][zvec - dataShadow.base[zt][zn]];
+ }
+
+ private void setupBlock() throws IOException {
+ if (this.data == null) {
+ return;
+ }
+
+ final int[] cftab = this.data.cftab;
+ final int[] tt = this.data.initTT(this.last + 1);
+ final byte[] ll8 = this.data.ll8;
+ cftab[0] = 0;
+ System.arraycopy(this.data.unzftab, 0, cftab, 1, 256);
+
+ for (int i = 1, c = cftab[0]; i <= 256; i++) {
+ c += cftab[i];
+ cftab[i] = c;
+ }
+
+ for (int i = 0, lastShadow = this.last; i <= lastShadow; i++) {
+ tt[cftab[ll8[i] & 0xff]++] = i;
+ }
+
+ if ((this.origPtr < 0) || (this.origPtr >= tt.length)) {
+ throw new IOException("stream corrupted");
+ }
+
+ this.su_tPos = tt[this.origPtr];
+ this.su_count = 0;
+ this.su_i2 = 0;
+ this.su_ch2 = 256; /* not a char and not EOF */
+
+ if (this.blockRandomised) {
+ this.su_rNToGo = 0;
+ this.su_rTPos = 0;
+ setupRandPartA();
+ } else {
+ setupNoRandPartA();
+ }
+ }
+
+ private void setupRandPartA() throws IOException {
+ if (this.su_i2 <= this.last) {
+ this.su_chPrev = this.su_ch2;
+ int su_ch2Shadow = this.data.ll8[this.su_tPos] & 0xff;
+ this.su_tPos = this.data.tt[this.su_tPos];
+ if (this.su_rNToGo == 0) {
+ this.su_rNToGo = Rand.rNums(this.su_rTPos) - 1;
+ if (++this.su_rTPos == 512) {
+ this.su_rTPos = 0;
+ }
+ } else {
+ this.su_rNToGo--;
+ }
+ this.su_ch2 = su_ch2Shadow ^= (this.su_rNToGo == 1) ? 1 : 0;
+ this.su_i2++;
+ this.currentChar = su_ch2Shadow;
+ this.currentState = RAND_PART_B_STATE;
+ this.crc.updateCRC(su_ch2Shadow);
+ } else {
+ endBlock();
+ initBlock();
+ setupBlock();
+ }
+ }
+
+ private void setupNoRandPartA() throws IOException {
+ if (this.su_i2 <= this.last) {
+ this.su_chPrev = this.su_ch2;
+ int su_ch2Shadow = this.data.ll8[this.su_tPos] & 0xff;
+ this.su_ch2 = su_ch2Shadow;
+ this.su_tPos = this.data.tt[this.su_tPos];
+ this.su_i2++;
+ this.currentChar = su_ch2Shadow;
+ this.currentState = NO_RAND_PART_B_STATE;
+ this.crc.updateCRC(su_ch2Shadow);
+ } else {
+ this.currentState = NO_RAND_PART_A_STATE;
+ endBlock();
+ initBlock();
+ setupBlock();
+ }
+ }
+
+ private void setupRandPartB() throws IOException {
+ if (this.su_ch2 != this.su_chPrev) {
+ this.currentState = RAND_PART_A_STATE;
+ this.su_count = 1;
+ setupRandPartA();
+ } else if (++this.su_count >= 4) {
+ this.su_z = (char) (this.data.ll8[this.su_tPos] & 0xff);
+ this.su_tPos = this.data.tt[this.su_tPos];
+ if (this.su_rNToGo == 0) {
+ this.su_rNToGo = Rand.rNums(this.su_rTPos) - 1;
+ if (++this.su_rTPos == 512) {
+ this.su_rTPos = 0;
+ }
+ } else {
+ this.su_rNToGo--;
+ }
+ this.su_j2 = 0;
+ this.currentState = RAND_PART_C_STATE;
+ if (this.su_rNToGo == 1) {
+ this.su_z ^= 1;
+ }
+ setupRandPartC();
+ } else {
+ this.currentState = RAND_PART_A_STATE;
+ setupRandPartA();
+ }
+ }
+
+ private void setupRandPartC() throws IOException {
+ if (this.su_j2 < this.su_z) {
+ this.currentChar = this.su_ch2;
+ this.crc.updateCRC(this.su_ch2);
+ this.su_j2++;
+ } else {
+ this.currentState = RAND_PART_A_STATE;
+ this.su_i2++;
+ this.su_count = 0;
+ setupRandPartA();
+ }
+ }
+
+ private void setupNoRandPartB() throws IOException {
+ if (this.su_ch2 != this.su_chPrev) {
+ this.su_count = 1;
+ setupNoRandPartA();
+ } else if (++this.su_count >= 4) {
+ this.su_z = (char) (this.data.ll8[this.su_tPos] & 0xff);
+ this.su_tPos = this.data.tt[this.su_tPos];
+ this.su_j2 = 0;
+ setupNoRandPartC();
+ } else {
+ setupNoRandPartA();
+ }
+ }
+
+ private void setupNoRandPartC() throws IOException {
+ if (this.su_j2 < this.su_z) {
+ int su_ch2Shadow = this.su_ch2;
+ this.currentChar = su_ch2Shadow;
+ this.crc.updateCRC(su_ch2Shadow);
+ this.su_j2++;
+ this.currentState = NO_RAND_PART_C_STATE;
+ } else {
+ this.su_i2++;
+ this.su_count = 0;
+ setupNoRandPartA();
+ }
+ }
+
+ private static final class Data extends Object {
+
+ // (with blockSize 900k)
+ final boolean[] inUse = new boolean[256]; // 256 byte
+
+ final byte[] seqToUnseq = new byte[256]; // 256 byte
+ final byte[] selector = new byte[MAX_SELECTORS]; // 18002 byte
+ final byte[] selectorMtf = new byte[MAX_SELECTORS]; // 18002 byte
+
+ /**
+ * Freq table collected to save a pass over the data during
+ * decompression.
+ */
+ final int[] unzftab = new int[256]; // 1024 byte
+
+ final int[][] limit = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte
+ final int[][] base = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte
+ final int[][] perm = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte
+ final int[] minLens = new int[N_GROUPS]; // 24 byte
+
+ final int[] cftab = new int[257]; // 1028 byte
+ final char[] getAndMoveToFrontDecode_yy = new char[256]; // 512 byte
+ final char[][] temp_charArray2d = new char[N_GROUPS][MAX_ALPHA_SIZE]; // 3096
+ // byte
+ final byte[] recvDecodingTables_pos = new byte[N_GROUPS]; // 6 byte
+ // ---------------
+ // 60798 byte
+
+ int[] tt; // 3600000 byte
+ byte[] ll8; // 900000 byte
+
+ // ---------------
+ // 4560782 byte
+ // ===============
+
+ Data(int blockSize100k) {
+ this.ll8 = new byte[blockSize100k * BZip2Constants.BASEBLOCKSIZE];
+ }
+
+ /**
+ * Initializes the {@link #tt} array.
+ *
+ * This method is called when the required length of the array is known.
+ * I don't initialize it at construction time to avoid unneccessary
+ * memory allocation when compressing small files.
+ */
+ int[] initTT(int length) {
+ int[] ttShadow = this.tt;
+
+ // tt.length should always be >= length, but theoretically
+ // it can happen, if the compressor mixed small and large
+ // blocks. Normally only the last block will be smaller
+ // than others.
+ if ((ttShadow == null) || (ttShadow.length < length)) {
+ this.tt = ttShadow = new int[length];
+ }
+
+ return ttShadow;
+ }
+
+ }
+
+ /**
+ * Checks if the signature matches what is expected for a bzip2 file.
+ *
+ * @param signature
+ * the bytes to check
+ * @param length
+ * the number of bytes to check
+ * @return true, if this stream is a bzip2 compressed stream, false otherwise
+ *
+ * @since 1.1
+ */
+ public static boolean matches(byte[] signature, int length) {
+
+ if (length < 3) {
+ return false;
+ }
+
+ if (signature[0] != 'B') {
+ return false;
+ }
+
+ if (signature[1] != 'Z') {
+ return false;
+ }
+
+ if (signature[2] != 'h') {
+ return false;
+ }
+
+ return true;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorOutputStream.java
new file mode 100644
index 000000000..a5eedcf04
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorOutputStream.java
@@ -0,0 +1,1329 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.bzip2;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+import org.apache.commons.compress.compressors.CompressorOutputStream;
+
+/**
+ * An output stream that compresses into the BZip2 format into another stream.
+ *
+ *
+ * The compression requires large amounts of memory. Thus you should call the
+ * {@link #close() close()} method as soon as possible, to force
+ * BZip2CompressorOutputStream to release the allocated memory.
+ *
+ *
+ * You can shrink the amount of allocated memory and maybe raise
+ * the compression speed by choosing a lower blocksize, which in turn
+ * may cause a lower compression ratio. You can avoid unnecessary
+ * memory allocation by avoiding using a blocksize which is bigger
+ * than the size of the input.
+ *
+ * You can compute the memory usage for compressing by the
+ * following formula:
+ *
+ *
+ * <code>400k + (9 * blocksize)</code>.
+ *
+ *
+ * To get the memory required for decompression by {@link
+ * BZip2CompressorInputStream} use
+ *
+ *
+ * <code>65k + (5 * blocksize)</code>.
+ *
+ *
+ *
+ *
+ * | Memory usage by blocksize |
+ *
+ *
+ * | Blocksize | Compression
+ * memory usage | Decompression
+ * memory usage |
+ *
+ *
+ * | 100k |
+ * 1300k |
+ * 565k |
+ *
+ *
+ * | 200k |
+ * 2200k |
+ * 1065k |
+ *
+ *
+ * | 300k |
+ * 3100k |
+ * 1565k |
+ *
+ *
+ * | 400k |
+ * 4000k |
+ * 2065k |
+ *
+ *
+ * | 500k |
+ * 4900k |
+ * 2565k |
+ *
+ *
+ * | 600k |
+ * 5800k |
+ * 3065k |
+ *
+ *
+ * | 700k |
+ * 6700k |
+ * 3565k |
+ *
+ *
+ * | 800k |
+ * 7600k |
+ * 4065k |
+ *
+ *
+ * | 900k |
+ * 8500k |
+ * 4565k |
+ *
+ *
+ *
+ *
+ * For decompression BZip2CompressorInputStream allocates less memory if the
+ * bzipped input is smaller than one block.
+ *
+ *
+ *
+ * Instances of this class are not threadsafe.
+ *
+ *
+ *
+ * TODO: Update to BZip2 1.0.1
+ *
+ * @NotThreadSafe
+ */
+public class BZip2CompressorOutputStream extends CompressorOutputStream
+ implements BZip2Constants {
+
+ /**
+ * The minimum supported blocksize == 1.
+ */
+ public static final int MIN_BLOCKSIZE = 1;
+
+ /**
+ * The maximum supported blocksize == 9.
+ */
+ public static final int MAX_BLOCKSIZE = 9;
+
+ private static final int GREATER_ICOST = 15;
+ private static final int LESSER_ICOST = 0;
+
+ private static void hbMakeCodeLengths(final byte[] len, final int[] freq,
+ final Data dat, final int alphaSize,
+ final int maxLen) {
+ /*
+ * Nodes and heap entries run from 1. Entry 0 for both the heap and
+ * nodes is a sentinel.
+ */
+ final int[] heap = dat.heap;
+ final int[] weight = dat.weight;
+ final int[] parent = dat.parent;
+
+ for (int i = alphaSize; --i >= 0;) {
+ weight[i + 1] = (freq[i] == 0 ? 1 : freq[i]) << 8;
+ }
+
+ for (boolean tooLong = true; tooLong;) {
+ tooLong = false;
+
+ int nNodes = alphaSize;
+ int nHeap = 0;
+ heap[0] = 0;
+ weight[0] = 0;
+ parent[0] = -2;
+
+ for (int i = 1; i <= alphaSize; i++) {
+ parent[i] = -1;
+ nHeap++;
+ heap[nHeap] = i;
+
+ int zz = nHeap;
+ int tmp = heap[zz];
+ while (weight[tmp] < weight[heap[zz >> 1]]) {
+ heap[zz] = heap[zz >> 1];
+ zz >>= 1;
+ }
+ heap[zz] = tmp;
+ }
+
+ while (nHeap > 1) {
+ int n1 = heap[1];
+ heap[1] = heap[nHeap];
+ nHeap--;
+
+ int yy = 0;
+ int zz = 1;
+ int tmp = heap[1];
+
+ while (true) {
+ yy = zz << 1;
+
+ if (yy > nHeap) {
+ break;
+ }
+
+ if ((yy < nHeap)
+ && (weight[heap[yy + 1]] < weight[heap[yy]])) {
+ yy++;
+ }
+
+ if (weight[tmp] < weight[heap[yy]]) {
+ break;
+ }
+
+ heap[zz] = heap[yy];
+ zz = yy;
+ }
+
+ heap[zz] = tmp;
+
+ int n2 = heap[1];
+ heap[1] = heap[nHeap];
+ nHeap--;
+
+ yy = 0;
+ zz = 1;
+ tmp = heap[1];
+
+ while (true) {
+ yy = zz << 1;
+
+ if (yy > nHeap) {
+ break;
+ }
+
+ if ((yy < nHeap)
+ && (weight[heap[yy + 1]] < weight[heap[yy]])) {
+ yy++;
+ }
+
+ if (weight[tmp] < weight[heap[yy]]) {
+ break;
+ }
+
+ heap[zz] = heap[yy];
+ zz = yy;
+ }
+
+ heap[zz] = tmp;
+ nNodes++;
+ parent[n1] = parent[n2] = nNodes;
+
+ final int weight_n1 = weight[n1];
+ final int weight_n2 = weight[n2];
+ weight[nNodes] = ((weight_n1 & 0xffffff00)
+ + (weight_n2 & 0xffffff00))
+ | (1 + (((weight_n1 & 0x000000ff)
+ > (weight_n2 & 0x000000ff))
+ ? (weight_n1 & 0x000000ff)
+ : (weight_n2 & 0x000000ff)));
+
+ parent[nNodes] = -1;
+ nHeap++;
+ heap[nHeap] = nNodes;
+
+ tmp = 0;
+ zz = nHeap;
+ tmp = heap[zz];
+ final int weight_tmp = weight[tmp];
+ while (weight_tmp < weight[heap[zz >> 1]]) {
+ heap[zz] = heap[zz >> 1];
+ zz >>= 1;
+ }
+ heap[zz] = tmp;
+
+ }
+
+ for (int i = 1; i <= alphaSize; i++) {
+ int j = 0;
+ int k = i;
+
+ for (int parent_k; (parent_k = parent[k]) >= 0;) {
+ k = parent_k;
+ j++;
+ }
+
+ len[i - 1] = (byte) j;
+ if (j > maxLen) {
+ tooLong = true;
+ }
+ }
+
+ if (tooLong) {
+ for (int i = 1; i < alphaSize; i++) {
+ int j = weight[i] >> 8;
+ j = 1 + (j >> 1);
+ weight[i] = j << 8;
+ }
+ }
+ }
+ }
+
+ /**
+ * Index of the last char in the block, so the block size == last + 1.
+ */
+ private int last;
+
+ /**
+ * Always: in the range 0 .. 9. The current block size is 100000 * this
+ * number.
+ */
+ private final int blockSize100k;
+
+ private int bsBuff;
+ private int bsLive;
+ private final CRC crc = new CRC();
+
+ private int nInUse;
+
+ private int nMTF;
+
+ private int currentChar = -1;
+ private int runLength = 0;
+
+ private int blockCRC;
+ private int combinedCRC;
+ private final int allowableBlockSize;
+
+ /**
+ * All memory intensive stuff.
+ */
+ private Data data;
+ private BlockSort blockSorter;
+
+ private OutputStream out;
+
+ /**
+ * Chooses a blocksize based on the given length of the data to compress.
+ *
+ * @return The blocksize, between {@link #MIN_BLOCKSIZE} and
+ * {@link #MAX_BLOCKSIZE} both inclusive. For a negative
+ * inputLength this method returns MAX_BLOCKSIZE
+ * always.
+ *
+ * @param inputLength
+ * The length of the data which will be compressed by
+ * BZip2CompressorOutputStream.
+ */
+ public static int chooseBlockSize(long inputLength) {
+ return (inputLength > 0) ? (int) Math
+ .min((inputLength / 132000) + 1, 9) : MAX_BLOCKSIZE;
+ }
+
+ /**
+ * Constructs a new BZip2CompressorOutputStream with a blocksize of 900k.
+ *
+ * @param out
+ * the destination stream.
+ *
+ * @throws IOException
+ * if an I/O error occurs in the specified stream.
+ * @throws NullPointerException
+ * if out == null.
+ */
+ public BZip2CompressorOutputStream(final OutputStream out)
+ throws IOException {
+ this(out, MAX_BLOCKSIZE);
+ }
+
+ /**
+ * Constructs a new BZip2CompressorOutputStream with specified blocksize.
+ *
+ * @param out
+ * the destination stream.
+ * @param blockSize
+ * the blockSize as 100k units.
+ *
+ * @throws IOException
+ * if an I/O error occurs in the specified stream.
+ * @throws IllegalArgumentException
+ * if (blockSize < 1) || (blockSize > 9).
+ * @throws NullPointerException
+ * if out == null.
+ *
+ * @see #MIN_BLOCKSIZE
+ * @see #MAX_BLOCKSIZE
+ */
+ public BZip2CompressorOutputStream(final OutputStream out, final int blockSize) throws IOException {
+ if (blockSize < 1) {
+ throw new IllegalArgumentException("blockSize(" + blockSize + ") < 1");
+ }
+ if (blockSize > 9) {
+ throw new IllegalArgumentException("blockSize(" + blockSize + ") > 9");
+ }
+
+ this.blockSize100k = blockSize;
+ this.out = out;
+
+ /* 20 is just a paranoia constant */
+ this.allowableBlockSize = (this.blockSize100k * BZip2Constants.BASEBLOCKSIZE) - 20;
+ init();
+ }
+
+ @Override
+ public void write(final int b) throws IOException {
+ if (this.out != null) {
+ write0(b);
+ } else {
+ throw new IOException("closed");
+ }
+ }
+
+ /**
+ * Writes the current byte to the buffer, run-length encoding it
+ * if it has been repeated at least four times (the first step
+ * RLEs sequences of four identical bytes).
+ *
+ * Flushes the current block before writing data if it is
+ * full.
+ *
+ * "write to the buffer" means adding to data.buffer starting
+ * two steps "after" this.last - initially starting at index 1
+ * (not 0) - and updating this.last to point to the last index
+ * written minus 1.
+ */
+ private void writeRun() throws IOException {
+ final int lastShadow = this.last;
+
+ if (lastShadow < this.allowableBlockSize) {
+ final int currentCharShadow = this.currentChar;
+ final Data dataShadow = this.data;
+ dataShadow.inUse[currentCharShadow] = true;
+ final byte ch = (byte) currentCharShadow;
+
+ int runLengthShadow = this.runLength;
+ this.crc.updateCRC(currentCharShadow, runLengthShadow);
+
+ switch (runLengthShadow) {
+ case 1:
+ dataShadow.block[lastShadow + 2] = ch;
+ this.last = lastShadow + 1;
+ break;
+
+ case 2:
+ dataShadow.block[lastShadow + 2] = ch;
+ dataShadow.block[lastShadow + 3] = ch;
+ this.last = lastShadow + 2;
+ break;
+
+ case 3: {
+ final byte[] block = dataShadow.block;
+ block[lastShadow + 2] = ch;
+ block[lastShadow + 3] = ch;
+ block[lastShadow + 4] = ch;
+ this.last = lastShadow + 3;
+ }
+ break;
+
+ default: {
+ runLengthShadow -= 4;
+ dataShadow.inUse[runLengthShadow] = true;
+ final byte[] block = dataShadow.block;
+ block[lastShadow + 2] = ch;
+ block[lastShadow + 3] = ch;
+ block[lastShadow + 4] = ch;
+ block[lastShadow + 5] = ch;
+ block[lastShadow + 6] = (byte) runLengthShadow;
+ this.last = lastShadow + 5;
+ }
+ break;
+
+ }
+ } else {
+ endBlock();
+ initBlock();
+ writeRun();
+ }
+ }
+
+ /**
+ * Overriden to close the stream.
+ */
+ @Override
+ protected void finalize() throws Throwable {
+ finish();
+ super.finalize();
+ }
+
+
+ public void finish() throws IOException {
+ if (out != null) {
+ try {
+ if (this.runLength > 0) {
+ writeRun();
+ }
+ this.currentChar = -1;
+ endBlock();
+ endCompression();
+ } finally {
+ this.out = null;
+ this.data = null;
+ this.blockSorter = null;
+ }
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (out != null) {
+ OutputStream outShadow = this.out;
+ finish();
+ outShadow.close();
+ }
+ }
+
+ @Override
+ public void flush() throws IOException {
+ OutputStream outShadow = this.out;
+ if (outShadow != null) {
+ outShadow.flush();
+ }
+ }
+
+ /**
+ * Writes magic bytes like BZ on the first position of the stream
+ * and bytes indiciating the file-format, which is
+ * huffmanised, followed by a digit indicating blockSize100k.
+ * @throws IOException if the magic bytes could not been written
+ */
+ private void init() throws IOException {
+ bsPutUByte('B');
+ bsPutUByte('Z');
+
+ this.data = new Data(this.blockSize100k);
+ this.blockSorter = new BlockSort(this.data);
+
+ // huffmanised magic bytes
+ bsPutUByte('h');
+ bsPutUByte('0' + this.blockSize100k);
+
+ this.combinedCRC = 0;
+ initBlock();
+ }
+
+ private void initBlock() {
+ // blockNo++;
+ this.crc.initialiseCRC();
+ this.last = -1;
+ // ch = 0;
+
+ boolean[] inUse = this.data.inUse;
+ for (int i = 256; --i >= 0;) {
+ inUse[i] = false;
+ }
+
+ }
+
+ private void endBlock() throws IOException {
+ this.blockCRC = this.crc.getFinalCRC();
+ this.combinedCRC = (this.combinedCRC << 1) | (this.combinedCRC >>> 31);
+ this.combinedCRC ^= this.blockCRC;
+
+ // empty block at end of file
+ if (this.last == -1) {
+ return;
+ }
+
+ /* sort the block and establish posn of original string */
+ blockSort();
+
+ /*
+ * A 6-byte block header, the value chosen arbitrarily as 0x314159265359
+ * :-). A 32 bit value does not really give a strong enough guarantee
+ * that the value will not appear by chance in the compressed
+ * datastream. Worst-case probability of this event, for a 900k block,
+ * is about 2.0e-3 for 32 bits, 1.0e-5 for 40 bits and 4.0e-8 for 48
+ * bits. For a compressed file of size 100Gb -- about 100000 blocks --
+ * only a 48-bit marker will do. NB: normal compression/ decompression
+ * donot rely on these statistical properties. They are only important
+ * when trying to recover blocks from damaged files.
+ */
+ bsPutUByte(0x31);
+ bsPutUByte(0x41);
+ bsPutUByte(0x59);
+ bsPutUByte(0x26);
+ bsPutUByte(0x53);
+ bsPutUByte(0x59);
+
+ /* Now the block's CRC, so it is in a known place. */
+ bsPutInt(this.blockCRC);
+
+ /* Now a single bit indicating no randomisation. */
+ bsW(1, 0);
+
+ /* Finally, block's contents proper. */
+ moveToFrontCodeAndSend();
+ }
+
+ private void endCompression() throws IOException {
+ /*
+ * Now another magic 48-bit number, 0x177245385090, to indicate the end
+ * of the last block. (sqrt(pi), if you want to know. I did want to use
+ * e, but it contains too much repetition -- 27 18 28 18 28 46 -- for me
+ * to feel statistically comfortable. Call me paranoid.)
+ */
+ bsPutUByte(0x17);
+ bsPutUByte(0x72);
+ bsPutUByte(0x45);
+ bsPutUByte(0x38);
+ bsPutUByte(0x50);
+ bsPutUByte(0x90);
+
+ bsPutInt(this.combinedCRC);
+ bsFinishedWithStream();
+ }
+
+ /**
+ * Returns the blocksize parameter specified at construction time.
+ */
+ public final int getBlockSize() {
+ return this.blockSize100k;
+ }
+
+ @Override
+ public void write(final byte[] buf, int offs, final int len)
+ throws IOException {
+ if (offs < 0) {
+ throw new IndexOutOfBoundsException("offs(" + offs + ") < 0.");
+ }
+ if (len < 0) {
+ throw new IndexOutOfBoundsException("len(" + len + ") < 0.");
+ }
+ if (offs + len > buf.length) {
+ throw new IndexOutOfBoundsException("offs(" + offs + ") + len("
+ + len + ") > buf.length("
+ + buf.length + ").");
+ }
+ if (this.out == null) {
+ throw new IOException("stream closed");
+ }
+
+ for (int hi = offs + len; offs < hi;) {
+ write0(buf[offs++]);
+ }
+ }
+
+ /**
+ * Keeps track of the last bytes written and implicitly performs
+ * run-length encoding as the first step of the bzip2 algorithm.
+ */
+ private void write0(int b) throws IOException {
+ if (this.currentChar != -1) {
+ b &= 0xff;
+ if (this.currentChar == b) {
+ if (++this.runLength > 254) {
+ writeRun();
+ this.currentChar = -1;
+ this.runLength = 0;
+ }
+ // else nothing to do
+ } else {
+ writeRun();
+ this.runLength = 1;
+ this.currentChar = b;
+ }
+ } else {
+ this.currentChar = b & 0xff;
+ this.runLength++;
+ }
+ }
+
+ private static void hbAssignCodes(final int[] code, final byte[] length,
+ final int minLen, final int maxLen,
+ final int alphaSize) {
+ int vec = 0;
+ for (int n = minLen; n <= maxLen; n++) {
+ for (int i = 0; i < alphaSize; i++) {
+ if ((length[i] & 0xff) == n) {
+ code[i] = vec;
+ vec++;
+ }
+ }
+ vec <<= 1;
+ }
+ }
+
+ private void bsFinishedWithStream() throws IOException {
+ while (this.bsLive > 0) {
+ int ch = this.bsBuff >> 24;
+ this.out.write(ch); // write 8-bit
+ this.bsBuff <<= 8;
+ this.bsLive -= 8;
+ }
+ }
+
+ private void bsW(final int n, final int v) throws IOException {
+ final OutputStream outShadow = this.out;
+ int bsLiveShadow = this.bsLive;
+ int bsBuffShadow = this.bsBuff;
+
+ while (bsLiveShadow >= 8) {
+ outShadow.write(bsBuffShadow >> 24); // write 8-bit
+ bsBuffShadow <<= 8;
+ bsLiveShadow -= 8;
+ }
+
+ this.bsBuff = bsBuffShadow | (v << (32 - bsLiveShadow - n));
+ this.bsLive = bsLiveShadow + n;
+ }
+
+ private void bsPutUByte(final int c) throws IOException {
+ bsW(8, c);
+ }
+
+ private void bsPutInt(final int u) throws IOException {
+ bsW(8, (u >> 24) & 0xff);
+ bsW(8, (u >> 16) & 0xff);
+ bsW(8, (u >> 8) & 0xff);
+ bsW(8, u & 0xff);
+ }
+
+ private void sendMTFValues() throws IOException {
+ final byte[][] len = this.data.sendMTFValues_len;
+ final int alphaSize = this.nInUse + 2;
+
+ for (int t = N_GROUPS; --t >= 0;) {
+ byte[] len_t = len[t];
+ for (int v = alphaSize; --v >= 0;) {
+ len_t[v] = GREATER_ICOST;
+ }
+ }
+
+ /* Decide how many coding tables to use */
+ // assert (this.nMTF > 0) : this.nMTF;
+ final int nGroups = (this.nMTF < 200) ? 2 : (this.nMTF < 600) ? 3
+ : (this.nMTF < 1200) ? 4 : (this.nMTF < 2400) ? 5 : 6;
+
+ /* Generate an initial set of coding tables */
+ sendMTFValues0(nGroups, alphaSize);
+
+ /*
+ * Iterate up to N_ITERS times to improve the tables.
+ */
+ final int nSelectors = sendMTFValues1(nGroups, alphaSize);
+
+ /* Compute MTF values for the selectors. */
+ sendMTFValues2(nGroups, nSelectors);
+
+ /* Assign actual codes for the tables. */
+ sendMTFValues3(nGroups, alphaSize);
+
+ /* Transmit the mapping table. */
+ sendMTFValues4();
+
+ /* Now the selectors. */
+ sendMTFValues5(nGroups, nSelectors);
+
+ /* Now the coding tables. */
+ sendMTFValues6(nGroups, alphaSize);
+
+ /* And finally, the block data proper */
+ sendMTFValues7();
+ }
+
+ private void sendMTFValues0(final int nGroups, final int alphaSize) {
+ final byte[][] len = this.data.sendMTFValues_len;
+ final int[] mtfFreq = this.data.mtfFreq;
+
+ int remF = this.nMTF;
+ int gs = 0;
+
+ for (int nPart = nGroups; nPart > 0; nPart--) {
+ final int tFreq = remF / nPart;
+ int ge = gs - 1;
+ int aFreq = 0;
+
+ for (final int a = alphaSize - 1; (aFreq < tFreq) && (ge < a);) {
+ aFreq += mtfFreq[++ge];
+ }
+
+ if ((ge > gs) && (nPart != nGroups) && (nPart != 1)
+ && (((nGroups - nPart) & 1) != 0)) {
+ aFreq -= mtfFreq[ge--];
+ }
+
+ final byte[] len_np = len[nPart - 1];
+ for (int v = alphaSize; --v >= 0;) {
+ if ((v >= gs) && (v <= ge)) {
+ len_np[v] = LESSER_ICOST;
+ } else {
+ len_np[v] = GREATER_ICOST;
+ }
+ }
+
+ gs = ge + 1;
+ remF -= aFreq;
+ }
+ }
+
+ private int sendMTFValues1(final int nGroups, final int alphaSize) {
+ final Data dataShadow = this.data;
+ final int[][] rfreq = dataShadow.sendMTFValues_rfreq;
+ final int[] fave = dataShadow.sendMTFValues_fave;
+ final short[] cost = dataShadow.sendMTFValues_cost;
+ final char[] sfmap = dataShadow.sfmap;
+ final byte[] selector = dataShadow.selector;
+ final byte[][] len = dataShadow.sendMTFValues_len;
+ final byte[] len_0 = len[0];
+ final byte[] len_1 = len[1];
+ final byte[] len_2 = len[2];
+ final byte[] len_3 = len[3];
+ final byte[] len_4 = len[4];
+ final byte[] len_5 = len[5];
+ final int nMTFShadow = this.nMTF;
+
+ int nSelectors = 0;
+
+ for (int iter = 0; iter < N_ITERS; iter++) {
+ for (int t = nGroups; --t >= 0;) {
+ fave[t] = 0;
+ int[] rfreqt = rfreq[t];
+ for (int i = alphaSize; --i >= 0;) {
+ rfreqt[i] = 0;
+ }
+ }
+
+ nSelectors = 0;
+
+ for (int gs = 0; gs < this.nMTF;) {
+ /* Set group start & end marks. */
+
+ /*
+ * Calculate the cost of this group as coded by each of the
+ * coding tables.
+ */
+
+ final int ge = Math.min(gs + G_SIZE - 1, nMTFShadow - 1);
+
+ if (nGroups == N_GROUPS) {
+ // unrolled version of the else-block
+
+ short cost0 = 0;
+ short cost1 = 0;
+ short cost2 = 0;
+ short cost3 = 0;
+ short cost4 = 0;
+ short cost5 = 0;
+
+ for (int i = gs; i <= ge; i++) {
+ final int icv = sfmap[i];
+ cost0 += len_0[icv] & 0xff;
+ cost1 += len_1[icv] & 0xff;
+ cost2 += len_2[icv] & 0xff;
+ cost3 += len_3[icv] & 0xff;
+ cost4 += len_4[icv] & 0xff;
+ cost5 += len_5[icv] & 0xff;
+ }
+
+ cost[0] = cost0;
+ cost[1] = cost1;
+ cost[2] = cost2;
+ cost[3] = cost3;
+ cost[4] = cost4;
+ cost[5] = cost5;
+
+ } else {
+ for (int t = nGroups; --t >= 0;) {
+ cost[t] = 0;
+ }
+
+ for (int i = gs; i <= ge; i++) {
+ final int icv = sfmap[i];
+ for (int t = nGroups; --t >= 0;) {
+ cost[t] += len[t][icv] & 0xff;
+ }
+ }
+ }
+
+ /*
+ * Find the coding table which is best for this group, and
+ * record its identity in the selector table.
+ */
+ int bt = -1;
+ for (int t = nGroups, bc = 999999999; --t >= 0;) {
+ final int cost_t = cost[t];
+ if (cost_t < bc) {
+ bc = cost_t;
+ bt = t;
+ }
+ }
+
+ fave[bt]++;
+ selector[nSelectors] = (byte) bt;
+ nSelectors++;
+
+ /*
+ * Increment the symbol frequencies for the selected table.
+ */
+ final int[] rfreq_bt = rfreq[bt];
+ for (int i = gs; i <= ge; i++) {
+ rfreq_bt[sfmap[i]]++;
+ }
+
+ gs = ge + 1;
+ }
+
+ /*
+ * Recompute the tables based on the accumulated frequencies.
+ */
+ for (int t = 0; t < nGroups; t++) {
+ hbMakeCodeLengths(len[t], rfreq[t], this.data, alphaSize, 20);
+ }
+ }
+
+ return nSelectors;
+ }
+
+ private void sendMTFValues2(final int nGroups, final int nSelectors) {
+ // assert (nGroups < 8) : nGroups;
+
+ final Data dataShadow = this.data;
+ byte[] pos = dataShadow.sendMTFValues2_pos;
+
+ for (int i = nGroups; --i >= 0;) {
+ pos[i] = (byte) i;
+ }
+
+ for (int i = 0; i < nSelectors; i++) {
+ final byte ll_i = dataShadow.selector[i];
+ byte tmp = pos[0];
+ int j = 0;
+
+ while (ll_i != tmp) {
+ j++;
+ byte tmp2 = tmp;
+ tmp = pos[j];
+ pos[j] = tmp2;
+ }
+
+ pos[0] = tmp;
+ dataShadow.selectorMtf[i] = (byte) j;
+ }
+ }
+
+ private void sendMTFValues3(final int nGroups, final int alphaSize) {
+ int[][] code = this.data.sendMTFValues_code;
+ byte[][] len = this.data.sendMTFValues_len;
+
+ for (int t = 0; t < nGroups; t++) {
+ int minLen = 32;
+ int maxLen = 0;
+ final byte[] len_t = len[t];
+ for (int i = alphaSize; --i >= 0;) {
+ final int l = len_t[i] & 0xff;
+ if (l > maxLen) {
+ maxLen = l;
+ }
+ if (l < minLen) {
+ minLen = l;
+ }
+ }
+
+ // assert (maxLen <= 20) : maxLen;
+ // assert (minLen >= 1) : minLen;
+
+ hbAssignCodes(code[t], len[t], minLen, maxLen, alphaSize);
+ }
+ }
+
+ private void sendMTFValues4() throws IOException {
+ final boolean[] inUse = this.data.inUse;
+ final boolean[] inUse16 = this.data.sentMTFValues4_inUse16;
+
+ for (int i = 16; --i >= 0;) {
+ inUse16[i] = false;
+ final int i16 = i * 16;
+ for (int j = 16; --j >= 0;) {
+ if (inUse[i16 + j]) {
+ inUse16[i] = true;
+ }
+ }
+ }
+
+ for (int i = 0; i < 16; i++) {
+ bsW(1, inUse16[i] ? 1 : 0);
+ }
+
+ final OutputStream outShadow = this.out;
+ int bsLiveShadow = this.bsLive;
+ int bsBuffShadow = this.bsBuff;
+
+ for (int i = 0; i < 16; i++) {
+ if (inUse16[i]) {
+ final int i16 = i * 16;
+ for (int j = 0; j < 16; j++) {
+ // inlined: bsW(1, inUse[i16 + j] ? 1 : 0);
+ while (bsLiveShadow >= 8) {
+ outShadow.write(bsBuffShadow >> 24); // write 8-bit
+ bsBuffShadow <<= 8;
+ bsLiveShadow -= 8;
+ }
+ if (inUse[i16 + j]) {
+ bsBuffShadow |= 1 << (32 - bsLiveShadow - 1);
+ }
+ bsLiveShadow++;
+ }
+ }
+ }
+
+ this.bsBuff = bsBuffShadow;
+ this.bsLive = bsLiveShadow;
+ }
+
+ private void sendMTFValues5(final int nGroups, final int nSelectors)
+ throws IOException {
+ bsW(3, nGroups);
+ bsW(15, nSelectors);
+
+ final OutputStream outShadow = this.out;
+ final byte[] selectorMtf = this.data.selectorMtf;
+
+ int bsLiveShadow = this.bsLive;
+ int bsBuffShadow = this.bsBuff;
+
+ for (int i = 0; i < nSelectors; i++) {
+ for (int j = 0, hj = selectorMtf[i] & 0xff; j < hj; j++) {
+ // inlined: bsW(1, 1);
+ while (bsLiveShadow >= 8) {
+ outShadow.write(bsBuffShadow >> 24);
+ bsBuffShadow <<= 8;
+ bsLiveShadow -= 8;
+ }
+ bsBuffShadow |= 1 << (32 - bsLiveShadow - 1);
+ bsLiveShadow++;
+ }
+
+ // inlined: bsW(1, 0);
+ while (bsLiveShadow >= 8) {
+ outShadow.write(bsBuffShadow >> 24);
+ bsBuffShadow <<= 8;
+ bsLiveShadow -= 8;
+ }
+ // bsBuffShadow |= 0 << (32 - bsLiveShadow - 1);
+ bsLiveShadow++;
+ }
+
+ this.bsBuff = bsBuffShadow;
+ this.bsLive = bsLiveShadow;
+ }
+
+ private void sendMTFValues6(final int nGroups, final int alphaSize)
+ throws IOException {
+ final byte[][] len = this.data.sendMTFValues_len;
+ final OutputStream outShadow = this.out;
+
+ int bsLiveShadow = this.bsLive;
+ int bsBuffShadow = this.bsBuff;
+
+ for (int t = 0; t < nGroups; t++) {
+ byte[] len_t = len[t];
+ int curr = len_t[0] & 0xff;
+
+ // inlined: bsW(5, curr);
+ while (bsLiveShadow >= 8) {
+ outShadow.write(bsBuffShadow >> 24); // write 8-bit
+ bsBuffShadow <<= 8;
+ bsLiveShadow -= 8;
+ }
+ bsBuffShadow |= curr << (32 - bsLiveShadow - 5);
+ bsLiveShadow += 5;
+
+ for (int i = 0; i < alphaSize; i++) {
+ int lti = len_t[i] & 0xff;
+ while (curr < lti) {
+ // inlined: bsW(2, 2);
+ while (bsLiveShadow >= 8) {
+ outShadow.write(bsBuffShadow >> 24); // write 8-bit
+ bsBuffShadow <<= 8;
+ bsLiveShadow -= 8;
+ }
+ bsBuffShadow |= 2 << (32 - bsLiveShadow - 2);
+ bsLiveShadow += 2;
+
+ curr++; /* 10 */
+ }
+
+ while (curr > lti) {
+ // inlined: bsW(2, 3);
+ while (bsLiveShadow >= 8) {
+ outShadow.write(bsBuffShadow >> 24); // write 8-bit
+ bsBuffShadow <<= 8;
+ bsLiveShadow -= 8;
+ }
+ bsBuffShadow |= 3 << (32 - bsLiveShadow - 2);
+ bsLiveShadow += 2;
+
+ curr--; /* 11 */
+ }
+
+ // inlined: bsW(1, 0);
+ while (bsLiveShadow >= 8) {
+ outShadow.write(bsBuffShadow >> 24); // write 8-bit
+ bsBuffShadow <<= 8;
+ bsLiveShadow -= 8;
+ }
+ // bsBuffShadow |= 0 << (32 - bsLiveShadow - 1);
+ bsLiveShadow++;
+ }
+ }
+
+ this.bsBuff = bsBuffShadow;
+ this.bsLive = bsLiveShadow;
+ }
+
+ private void sendMTFValues7() throws IOException {
+ final Data dataShadow = this.data;
+ final byte[][] len = dataShadow.sendMTFValues_len;
+ final int[][] code = dataShadow.sendMTFValues_code;
+ final OutputStream outShadow = this.out;
+ final byte[] selector = dataShadow.selector;
+ final char[] sfmap = dataShadow.sfmap;
+ final int nMTFShadow = this.nMTF;
+
+ int selCtr = 0;
+
+ int bsLiveShadow = this.bsLive;
+ int bsBuffShadow = this.bsBuff;
+
+ for (int gs = 0; gs < nMTFShadow;) {
+ final int ge = Math.min(gs + G_SIZE - 1, nMTFShadow - 1);
+ final int selector_selCtr = selector[selCtr] & 0xff;
+ final int[] code_selCtr = code[selector_selCtr];
+ final byte[] len_selCtr = len[selector_selCtr];
+
+ while (gs <= ge) {
+ final int sfmap_i = sfmap[gs];
+
+ //
+ // inlined: bsW(len_selCtr[sfmap_i] & 0xff,
+ // code_selCtr[sfmap_i]);
+ //
+ while (bsLiveShadow >= 8) {
+ outShadow.write(bsBuffShadow >> 24);
+ bsBuffShadow <<= 8;
+ bsLiveShadow -= 8;
+ }
+ final int n = len_selCtr[sfmap_i] & 0xFF;
+ bsBuffShadow |= code_selCtr[sfmap_i] << (32 - bsLiveShadow - n);
+ bsLiveShadow += n;
+
+ gs++;
+ }
+
+ gs = ge + 1;
+ selCtr++;
+ }
+
+ this.bsBuff = bsBuffShadow;
+ this.bsLive = bsLiveShadow;
+ }
+
+ private void moveToFrontCodeAndSend() throws IOException {
+ bsW(24, this.data.origPtr);
+ generateMTFValues();
+ sendMTFValues();
+ }
+
+ private void blockSort() {
+ blockSorter.blockSort(data, last);
+ }
+
+ /*
+ * Performs Move-To-Front on the Burrows-Wheeler transformed
+ * buffer, storing the MTFed data in data.sfmap in RUNA/RUNB
+ * run-length-encoded form.
+ *
+ * Keeps track of byte frequencies in data.mtfFreq at the same time.
+ */
+ private void generateMTFValues() {
+ final int lastShadow = this.last;
+ final Data dataShadow = this.data;
+ final boolean[] inUse = dataShadow.inUse;
+ final byte[] block = dataShadow.block;
+ final int[] fmap = dataShadow.fmap;
+ final char[] sfmap = dataShadow.sfmap;
+ final int[] mtfFreq = dataShadow.mtfFreq;
+ final byte[] unseqToSeq = dataShadow.unseqToSeq;
+ final byte[] yy = dataShadow.generateMTFValues_yy;
+
+ // make maps
+ int nInUseShadow = 0;
+ for (int i = 0; i < 256; i++) {
+ if (inUse[i]) {
+ unseqToSeq[i] = (byte) nInUseShadow;
+ nInUseShadow++;
+ }
+ }
+ this.nInUse = nInUseShadow;
+
+ final int eob = nInUseShadow + 1;
+
+ for (int i = eob; i >= 0; i--) {
+ mtfFreq[i] = 0;
+ }
+
+ for (int i = nInUseShadow; --i >= 0;) {
+ yy[i] = (byte) i;
+ }
+
+ int wr = 0;
+ int zPend = 0;
+
+ for (int i = 0; i <= lastShadow; i++) {
+ final byte ll_i = unseqToSeq[block[fmap[i]] & 0xff];
+ byte tmp = yy[0];
+ int j = 0;
+
+ while (ll_i != tmp) {
+ j++;
+ byte tmp2 = tmp;
+ tmp = yy[j];
+ yy[j] = tmp2;
+ }
+ yy[0] = tmp;
+
+ if (j == 0) {
+ zPend++;
+ } else {
+ if (zPend > 0) {
+ zPend--;
+ while (true) {
+ if ((zPend & 1) == 0) {
+ sfmap[wr] = RUNA;
+ wr++;
+ mtfFreq[RUNA]++;
+ } else {
+ sfmap[wr] = RUNB;
+ wr++;
+ mtfFreq[RUNB]++;
+ }
+
+ if (zPend >= 2) {
+ zPend = (zPend - 2) >> 1;
+ } else {
+ break;
+ }
+ }
+ zPend = 0;
+ }
+ sfmap[wr] = (char) (j + 1);
+ wr++;
+ mtfFreq[j + 1]++;
+ }
+ }
+
+ if (zPend > 0) {
+ zPend--;
+ while (true) {
+ if ((zPend & 1) == 0) {
+ sfmap[wr] = RUNA;
+ wr++;
+ mtfFreq[RUNA]++;
+ } else {
+ sfmap[wr] = RUNB;
+ wr++;
+ mtfFreq[RUNB]++;
+ }
+
+ if (zPend >= 2) {
+ zPend = (zPend - 2) >> 1;
+ } else {
+ break;
+ }
+ }
+ }
+
+ sfmap[wr] = (char) eob;
+ mtfFreq[eob]++;
+ this.nMTF = wr + 1;
+ }
+
+ static final class Data extends Object {
+
+ // with blockSize 900k
+ /* maps unsigned byte => "does it occur in block" */
+ final boolean[] inUse = new boolean[256]; // 256 byte
+ final byte[] unseqToSeq = new byte[256]; // 256 byte
+ final int[] mtfFreq = new int[MAX_ALPHA_SIZE]; // 1032 byte
+ final byte[] selector = new byte[MAX_SELECTORS]; // 18002 byte
+ final byte[] selectorMtf = new byte[MAX_SELECTORS]; // 18002 byte
+
+ final byte[] generateMTFValues_yy = new byte[256]; // 256 byte
+ final byte[][] sendMTFValues_len = new byte[N_GROUPS][MAX_ALPHA_SIZE]; // 1548
+ // byte
+ final int[][] sendMTFValues_rfreq = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192
+ // byte
+ final int[] sendMTFValues_fave = new int[N_GROUPS]; // 24 byte
+ final short[] sendMTFValues_cost = new short[N_GROUPS]; // 12 byte
+ final int[][] sendMTFValues_code = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192
+ // byte
+ final byte[] sendMTFValues2_pos = new byte[N_GROUPS]; // 6 byte
+ final boolean[] sentMTFValues4_inUse16 = new boolean[16]; // 16 byte
+
+ final int[] heap = new int[MAX_ALPHA_SIZE + 2]; // 1040 byte
+ final int[] weight = new int[MAX_ALPHA_SIZE * 2]; // 2064 byte
+ final int[] parent = new int[MAX_ALPHA_SIZE * 2]; // 2064 byte
+
+ // ------------
+ // 333408 byte
+
+ /* holds the RLEd block of original data starting at index 1.
+ * After sorting the last byte added to the buffer is at index
+ * 0. */
+ final byte[] block; // 900021 byte
+ /* maps index in Burrows-Wheeler transformed block => index of
+ * byte in original block */
+ final int[] fmap; // 3600000 byte
+ final char[] sfmap; // 3600000 byte
+ // ------------
+ // 8433529 byte
+ // ============
+
+ /**
+ * Index of original line in Burrows-Wheeler table.
+ *
+ * This is the index in fmap that points to the last byte
+ * of the original data.
+ */
+ int origPtr;
+
+ Data(int blockSize100k) {
+ final int n = blockSize100k * BZip2Constants.BASEBLOCKSIZE;
+ this.block = new byte[(n + 1 + NUM_OVERSHOOT_BYTES)];
+ this.fmap = new int[n];
+ this.sfmap = new char[2 * n];
+ }
+
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2Constants.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2Constants.java
new file mode 100644
index 000000000..9a8b9c4c0
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2Constants.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.compressors.bzip2;
+
+/**
+ * Constants for both the compress and decompress BZip2 classes.
+ */
+interface BZip2Constants {
+
+ int BASEBLOCKSIZE = 100000;
+ int MAX_ALPHA_SIZE = 258;
+ int MAX_CODE_LEN = 23;
+ int RUNA = 0;
+ int RUNB = 1;
+ int N_GROUPS = 6;
+ int G_SIZE = 50;
+ int N_ITERS = 4;
+ int MAX_SELECTORS = (2 + (900000 / G_SIZE));
+ int NUM_OVERSHOOT_BYTES = 20;
+
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2Utils.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2Utils.java
new file mode 100644
index 000000000..e56283512
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BZip2Utils.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.bzip2;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+import org.apache.commons.compress.compressors.FileNameUtil;
+
+/**
+ * Utility code for the BZip2 compression format.
+ * @ThreadSafe
+ * @since 1.1
+ */
+public abstract class BZip2Utils {
+
+ private static final FileNameUtil fileNameUtil;
+
+ static {
+ Map uncompressSuffix =
+ new LinkedHashMap();
+ // backwards compatibilty: BZip2Utils never created the short
+ // tbz form, so .tar.bz2 has to be added explicitly
+ uncompressSuffix.put(".tar.bz2", ".tar");
+ uncompressSuffix.put(".tbz2", ".tar");
+ uncompressSuffix.put(".tbz", ".tar");
+ uncompressSuffix.put(".bz2", "");
+ uncompressSuffix.put(".bz", "");
+ fileNameUtil = new FileNameUtil(uncompressSuffix, ".bz2");
+ }
+
+ /** Private constructor to prevent instantiation of this utility class. */
+ private BZip2Utils() {
+ }
+
+ /**
+ * Detects common bzip2 suffixes in the given filename.
+ *
+ * @param filename name of a file
+ * @return {@code true} if the filename has a common bzip2 suffix,
+ * {@code false} otherwise
+ */
+ public static boolean isCompressedFilename(String filename) {
+ return fileNameUtil.isCompressedFilename(filename);
+ }
+
+ /**
+ * Maps the given name of a bzip2-compressed file to the name that the
+ * file should have after uncompression. Commonly used file type specific
+ * suffixes like ".tbz" or ".tbz2" are automatically detected and
+ * correctly mapped. For example the name "package.tbz2" is mapped to
+ * "package.tar". And any filenames with the generic ".bz2" suffix
+ * (or any other generic bzip2 suffix) is mapped to a name without that
+ * suffix. If no bzip2 suffix is detected, then the filename is returned
+ * unmapped.
+ *
+ * @param filename name of a file
+ * @return name of the corresponding uncompressed file
+ */
+ public static String getUncompressedFilename(String filename) {
+ return fileNameUtil.getUncompressedFilename(filename);
+ }
+
+ /**
+ * Maps the given filename to the name that the file should have after
+ * compression with bzip2. Currently this method simply appends the suffix
+ * ".bz2" to the filename based on the standard behaviour of the "bzip2"
+ * program, but a future version may implement a more complex mapping if
+ * a new widely used naming pattern emerges.
+ *
+ * @param filename name of a file
+ * @return name of the corresponding compressed file
+ */
+ public static String getCompressedFilename(String filename) {
+ return fileNameUtil.getCompressedFilename(filename);
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BlockSort.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BlockSort.java
new file mode 100644
index 000000000..c7c26b23d
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/BlockSort.java
@@ -0,0 +1,1081 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.bzip2;
+
+import java.util.BitSet;
+
+/**
+ * Encapsulates the Burrows-Wheeler sorting algorithm needed by {@link
+ * BZip2CompressorOutputStream}.
+ *
+ * This class is based on a Java port of Julian Seward's
+ * blocksort.c in his libbzip2
+ *
+ * The Burrows-Wheeler transform is a reversible transform of the
+ * original data that is supposed to group similiar bytes close to
+ * each other. The idea is to sort all permutations of the input and
+ * only keep the last byte of each permutation. E.g. for "Commons
+ * Compress" you'd get:
+ *
+ *
+ * CompressCommons
+ * Commons Compress
+ * CompressCommons
+ * essCommons Compr
+ * mmons CompressCo
+ * mons CompressCom
+ * mpressCommons Co
+ * ns CompressCommo
+ * ommons CompressC
+ * ompressCommons C
+ * ons CompressComm
+ * pressCommons Com
+ * ressCommons Comp
+ * s CompressCommon
+ * sCommons Compres
+ * ssCommons Compre
+ *
+ *
+ * Which results in a new text "ss romooCCmmpnse", in adition the
+ * index of the first line that contained the original text is kept -
+ * in this case it is 1. The idea is that in a long English text all
+ * permutations that start with "he" are likely suffixes of a "the" and
+ * thus they end in "t" leading to a larger block of "t"s that can
+ * better be compressed by the subsequent Move-to-Front, run-length
+ * und Huffman encoding steps.
+ *
+ * For more information see for example:
+ *
+ *
+ * @NotThreadSafe
+ */
+class BlockSort {
+
+ /*
+ * Some of the constructs used in the C code cannot be ported
+ * literally to Java - for example macros, unsigned types. Some
+ * code has been hand-tuned to improve performance. In order to
+ * avoid memory pressure some structures are reused for several
+ * blocks and some memory is even shared between sorting and the
+ * MTF stage even though either algorithm uses it for its own
+ * purpose.
+ *
+ * Comments preserved from the actual C code are prefixed with
+ * "LBZ2:".
+ */
+
+ /*
+ * 2012-05-20 Stefan Bodewig:
+ *
+ * This class seems to mix several revisions of libbzip2's code.
+ * The mainSort function and those used by it look closer to the
+ * 0.9.5 version but show some variations introduced later. At
+ * the same time the logic of Compress 1.4 to randomize the block
+ * on bad input has been dropped after libbzip2 0.9.0 and replaced
+ * by a fallback sorting algorithm.
+ *
+ * I've added the fallbackSort function of 1.0.6 and tried to
+ * integrate it with the existing code without touching too much.
+ * I've also removed the now unused randomization code.
+ */
+
+ /*
+ * LBZ2: If you are ever unlucky/improbable enough to get a stack
+ * overflow whilst sorting, increase the following constant and
+ * try again. In practice I have never seen the stack go above 27
+ * elems, so the following limit seems very generous.
+ */
+ private static final int QSORT_STACK_SIZE = 1000;
+
+ private static final int FALLBACK_QSORT_STACK_SIZE = 100;
+
+ private static final int STACK_SIZE =
+ QSORT_STACK_SIZE < FALLBACK_QSORT_STACK_SIZE
+ ? FALLBACK_QSORT_STACK_SIZE : QSORT_STACK_SIZE;
+
+ /*
+ * Used when sorting. If too many long comparisons happen, we stop sorting,
+ * and use fallbackSort instead.
+ */
+ private int workDone;
+ private int workLimit;
+ private boolean firstAttempt;
+
+ private final int[] stack_ll = new int[STACK_SIZE]; // 4000 byte
+ private final int[] stack_hh = new int[STACK_SIZE]; // 4000 byte
+ private final int[] stack_dd = new int[QSORT_STACK_SIZE]; // 4000 byte
+
+ private final int[] mainSort_runningOrder = new int[256]; // 1024 byte
+ private final int[] mainSort_copy = new int[256]; // 1024 byte
+ private final boolean[] mainSort_bigDone = new boolean[256]; // 256 byte
+
+ private final int[] ftab = new int[65537]; // 262148 byte
+
+ /**
+ * Array instance identical to Data's sfmap, both are used only
+ * temporarily and indepently, so we do not need to allocate
+ * additional memory.
+ */
+ private final char[] quadrant;
+
+ BlockSort(final BZip2CompressorOutputStream.Data data) {
+ this.quadrant = data.sfmap;
+ }
+
+ void blockSort(final BZip2CompressorOutputStream.Data data, final int last) {
+ this.workLimit = WORK_FACTOR * last;
+ this.workDone = 0;
+ this.firstAttempt = true;
+
+ if (last + 1 < 10000) {
+ fallbackSort(data, last);
+ } else {
+ mainSort(data, last);
+
+ if (this.firstAttempt && (this.workDone > this.workLimit)) {
+ fallbackSort(data, last);
+ }
+ }
+
+ final int[] fmap = data.fmap;
+ data.origPtr = -1;
+ for (int i = 0; i <= last; i++) {
+ if (fmap[i] == 0) {
+ data.origPtr = i;
+ break;
+ }
+ }
+
+ // assert (data.origPtr != -1) : data.origPtr;
+ }
+
+ /**
+ * Adapt fallbackSort to the expected interface of the rest of the
+ * code, in particular deal with the fact that block starts at
+ * offset 1 (in libbzip2 1.0.6 it starts at 0).
+ */
+ final void fallbackSort(final BZip2CompressorOutputStream.Data data,
+ final int last) {
+ data.block[0] = data.block[last + 1];
+ fallbackSort(data.fmap, data.block, last + 1);
+ for (int i = 0; i < last + 1; i++) {
+ --data.fmap[i];
+ }
+ for (int i = 0; i < last + 1; i++) {
+ if (data.fmap[i] == -1) {
+ data.fmap[i] = last;
+ break;
+ }
+ }
+ }
+
+/*---------------------------------------------*/
+
+/*---------------------------------------------*/
+/*--- LBZ2: Fallback O(N log(N)^2) sorting ---*/
+/*--- algorithm, for repetitive blocks ---*/
+/*---------------------------------------------*/
+
+ /*
+ * This is the fallback sorting algorithm libbzip2 1.0.6 uses for
+ * repetitive or very short inputs.
+ *
+ * The idea is inspired by Manber-Myers string suffix sorting
+ * algorithm. First a bucket sort places each permutation of the
+ * block into a bucket based on its first byte. Permutations are
+ * represented by pointers to their first character kept in
+ * (partially) sorted order inside the array ftab.
+ *
+ * The next step visits all buckets in order and performs a
+ * quicksort on all permutations of the bucket based on the index
+ * of the bucket the second byte of the permutation belongs to,
+ * thereby forming new buckets. When arrived here the
+ * permutations are sorted up to the second character and we have
+ * buckets of permutations that are identical up to two
+ * characters.
+ *
+ * Repeat the step of quicksorting each bucket, now based on the
+ * bucket holding the sequence of the third and forth character
+ * leading to four byte buckets. Repeat this doubling of bucket
+ * sizes until all buckets only contain single permutations or the
+ * bucket size exceeds the block size.
+ *
+ * I.e.
+ *
+ * "abraba" form three buckets for the chars "a", "b", and "r" in
+ * the first step with
+ *
+ * fmap = { 'a:' 5, 3, 0, 'b:' 4, 1, 'r', 2 }
+ *
+ * when looking at the bucket of "a"s the second characters are in
+ * the buckets that start with fmap-index 0 (rolled over), 3 and 3
+ * respectively, forming two new buckets "aa" and "ab", so we get
+ *
+ * fmap = { 'aa:' 5, 'ab:' 3, 0, 'ba:' 4, 'br': 1, 'ra:' 2 }
+ *
+ * since the last bucket only contained a single item it didn't
+ * have to be sorted at all.
+ *
+ * There now is just one bucket with more than one permutation
+ * that remains to be sorted. For the permutation that starts
+ * with index 3 the third and forth char are in bucket 'aa' at
+ * index 0 and for the one starting at block index 0 they are in
+ * bucket 'ra' with sort index 5. The fully sorted order then becomes.
+ *
+ * fmap = { 5, 3, 0, 4, 1, 2 }
+ *
+ */
+
+ /**
+ * @param fmap points to the index of the starting point of a
+ * permutation inside the block of data in the current
+ * partially sorted order
+ * @param eclass points from the index of a character inside the
+ * block to the first index in fmap that contains the
+ * bucket of its suffix that is sorted in this step.
+ * @param lo lower boundary of the fmap-interval to be sorted
+ * @param hi upper boundary of the fmap-interval to be sorted
+ */
+ private void fallbackSimpleSort(int[] fmap,
+ int[] eclass,
+ int lo,
+ int hi) {
+ if (lo == hi) {
+ return;
+ }
+
+ int j;
+ if (hi - lo > 3) {
+ for (int i = hi - 4; i >= lo; i--) {
+ int tmp = fmap[i];
+ int ec_tmp = eclass[tmp];
+ for (j = i + 4; j <= hi && ec_tmp > eclass[fmap[j]];
+ j += 4) {
+ fmap[j - 4] = fmap[j];
+ }
+ fmap[j - 4] = tmp;
+ }
+ }
+
+ for (int i = hi - 1; i >= lo; i--) {
+ int tmp = fmap[i];
+ int ec_tmp = eclass[tmp];
+ for (j = i + 1; j <= hi && ec_tmp > eclass[fmap[j]]; j++) {
+ fmap[j - 1] = fmap[j];
+ }
+ fmap[j-1] = tmp;
+ }
+ }
+
+ private static final int FALLBACK_QSORT_SMALL_THRESH = 10;
+
+ /**
+ * swaps two values in fmap
+ */
+ private void fswap(int[] fmap, int zz1, int zz2) {
+ int zztmp = fmap[zz1];
+ fmap[zz1] = fmap[zz2];
+ fmap[zz2] = zztmp;
+ }
+
+ /**
+ * swaps two intervals starting at yyp1 and yyp2 of length yyn inside fmap.
+ */
+ private void fvswap(int[] fmap, int yyp1, int yyp2, int yyn) {
+ while (yyn > 0) {
+ fswap(fmap, yyp1, yyp2);
+ yyp1++; yyp2++; yyn--;
+ }
+ }
+
+ private int fmin(int a, int b) {
+ return a < b ? a : b;
+ }
+
+ private void fpush(int sp, int lz, int hz) {
+ stack_ll[sp] = lz;
+ stack_hh[sp] = hz;
+ }
+
+ private int[] fpop(int sp) {
+ return new int[] { stack_ll[sp], stack_hh[sp] };
+ }
+
+ /**
+ * @param fmap points to the index of the starting point of a
+ * permutation inside the block of data in the current
+ * partially sorted order
+ * @param eclass points from the index of a character inside the
+ * block to the first index in fmap that contains the
+ * bucket of its suffix that is sorted in this step.
+ * @param loSt lower boundary of the fmap-interval to be sorted
+ * @param hiSt upper boundary of the fmap-interval to be sorted
+ */
+ private void fallbackQSort3(int[] fmap,
+ int[] eclass,
+ int loSt,
+ int hiSt) {
+ int lo, unLo, ltLo, hi, unHi, gtHi, n;
+
+ long r = 0;
+ int sp = 0;
+ fpush(sp++, loSt, hiSt);
+
+ while (sp > 0) {
+ int[] s = fpop(--sp);
+ lo = s[0]; hi = s[1];
+
+ if (hi - lo < FALLBACK_QSORT_SMALL_THRESH) {
+ fallbackSimpleSort(fmap, eclass, lo, hi);
+ continue;
+ }
+
+ /* LBZ2: Random partitioning. Median of 3 sometimes fails to
+ avoid bad cases. Median of 9 seems to help but
+ looks rather expensive. This too seems to work but
+ is cheaper. Guidance for the magic constants
+ 7621 and 32768 is taken from Sedgewick's algorithms
+ book, chapter 35.
+ */
+ r = ((r * 7621) + 1) % 32768;
+ long r3 = r % 3, med;
+ if (r3 == 0) {
+ med = eclass[fmap[lo]];
+ } else if (r3 == 1) {
+ med = eclass[fmap[(lo + hi) >>> 1]];
+ } else {
+ med = eclass[fmap[hi]];
+ }
+
+ unLo = ltLo = lo;
+ unHi = gtHi = hi;
+
+ // looks like the ternary partition attributed to Wegner
+ // in the cited Sedgewick paper
+ while (true) {
+ while (true) {
+ if (unLo > unHi) {
+ break;
+ }
+ n = eclass[fmap[unLo]] - (int) med;
+ if (n == 0) {
+ fswap(fmap, unLo, ltLo);
+ ltLo++; unLo++;
+ continue;
+ }
+ if (n > 0) {
+ break;
+ }
+ unLo++;
+ }
+ while (true) {
+ if (unLo > unHi) {
+ break;
+ }
+ n = eclass[fmap[unHi]] - (int) med;
+ if (n == 0) {
+ fswap(fmap, unHi, gtHi);
+ gtHi--; unHi--;
+ continue;
+ }
+ if (n < 0) {
+ break;
+ }
+ unHi--;
+ }
+ if (unLo > unHi) {
+ break;
+ }
+ fswap(fmap, unLo, unHi); unLo++; unHi--;
+ }
+
+ if (gtHi < ltLo) {
+ continue;
+ }
+
+ n = fmin(ltLo - lo, unLo - ltLo);
+ fvswap(fmap, lo, unLo - n, n);
+ int m = fmin(hi - gtHi, gtHi - unHi);
+ fvswap(fmap, unHi + 1, hi - m + 1, m);
+
+ n = lo + unLo - ltLo - 1;
+ m = hi - (gtHi - unHi) + 1;
+
+ if (n - lo > hi - m) {
+ fpush(sp++, lo, n);
+ fpush(sp++, m, hi);
+ } else {
+ fpush(sp++, m, hi);
+ fpush(sp++, lo, n);
+ }
+ }
+ }
+
+
+/*---------------------------------------------*/
+
+ private int[] eclass;
+
+ private int[] getEclass() {
+ return eclass == null
+ ? (eclass = new int[quadrant.length / 2]) : eclass;
+ }
+
+ /*
+ * The C code uses an array of ints (each int holding 32 flags) to
+ * represents the bucket-start flags (bhtab). It also contains
+ * optimizations to skip over 32 consecutively set or
+ * consecutively unset bits on word boundaries at once. For now
+ * I've chosen to use the simpler but potentially slower code
+ * using BitSet - also in the hope that using the BitSet#nextXXX
+ * methods may be fast enough.
+ */
+
+ /**
+ * @param fmap points to the index of the starting point of a
+ * permutation inside the block of data in the current
+ * partially sorted order
+ * @param block the original data
+ * @param nblock size of the block
+ * @param off offset of first byte to sort in block
+ */
+ final void fallbackSort(int[] fmap, byte[] block, int nblock) {
+ final int[] ftab = new int[257];
+ int H, i, j, k, l, r, cc, cc1;
+ int nNotDone;
+ int nBhtab;
+ final int[] eclass = getEclass();
+
+ for (i = 0; i < nblock; i++) {
+ eclass[i] = 0;
+ }
+ /*--
+ LBZ2: Initial 1-char radix sort to generate
+ initial fmap and initial BH bits.
+ --*/
+ for (i = 0; i < nblock; i++) {
+ ftab[block[i] & 0xff]++;
+ }
+ for (i = 1; i < 257; i++) {
+ ftab[i] += ftab[i - 1];
+ }
+
+ for (i = 0; i < nblock; i++) {
+ j = block[i] & 0xff;
+ k = ftab[j] - 1;
+ ftab[j] = k;
+ fmap[k] = i;
+ }
+
+ nBhtab = 64 + nblock;
+ BitSet bhtab = new BitSet(nBhtab);
+ for (i = 0; i < 256; i++) {
+ bhtab.set(ftab[i]);
+ }
+
+ /*--
+ LBZ2: Inductively refine the buckets. Kind-of an
+ "exponential radix sort" (!), inspired by the
+ Manber-Myers suffix array construction algorithm.
+ --*/
+
+ /*-- LBZ2: set sentinel bits for block-end detection --*/
+ for (i = 0; i < 32; i++) {
+ bhtab.set(nblock + 2 * i);
+ bhtab.clear(nblock + 2 * i + 1);
+ }
+
+ /*-- LBZ2: the log(N) loop --*/
+ H = 1;
+ while (true) {
+
+ j = 0;
+ for (i = 0; i < nblock; i++) {
+ if (bhtab.get(i)) {
+ j = i;
+ }
+ k = fmap[i] - H;
+ if (k < 0) {
+ k += nblock;
+ }
+ eclass[k] = j;
+ }
+
+ nNotDone = 0;
+ r = -1;
+ while (true) {
+
+ /*-- LBZ2: find the next non-singleton bucket --*/
+ k = r + 1;
+ k = bhtab.nextClearBit(k);
+ l = k - 1;
+ if (l >= nblock) {
+ break;
+ }
+ k = bhtab.nextSetBit(k + 1);
+ r = k - 1;
+ if (r >= nblock) {
+ break;
+ }
+
+ /*-- LBZ2: now [l, r] bracket current bucket --*/
+ if (r > l) {
+ nNotDone += (r - l + 1);
+ fallbackQSort3(fmap, eclass, l, r);
+
+ /*-- LBZ2: scan bucket and generate header bits-- */
+ cc = -1;
+ for (i = l; i <= r; i++) {
+ cc1 = eclass[fmap[i]];
+ if (cc != cc1) {
+ bhtab.set(i);
+ cc = cc1;
+ }
+ }
+ }
+ }
+
+ H *= 2;
+ if (H > nblock || nNotDone == 0) {
+ break;
+ }
+ }
+ }
+
+/*---------------------------------------------*/
+
+ /*
+ * LBZ2: Knuth's increments seem to work better than Incerpi-Sedgewick here.
+ * Possibly because the number of elems to sort is usually small, typically
+ * <= 20.
+ */
+ private static final int[] INCS = { 1, 4, 13, 40, 121, 364, 1093, 3280,
+ 9841, 29524, 88573, 265720, 797161,
+ 2391484 };
+
+ /**
+ * This is the most hammered method of this class.
+ *
+ *
+ * This is the version using unrolled loops. Normally I never use such ones
+ * in Java code. The unrolling has shown a noticable performance improvement
+ * on JRE 1.4.2 (Linux i586 / HotSpot Client). Of course it depends on the
+ * JIT compiler of the vm.
+ *
+ */
+ private boolean mainSimpleSort(final BZip2CompressorOutputStream.Data dataShadow,
+ final int lo, final int hi, final int d,
+ final int lastShadow) {
+ final int bigN = hi - lo + 1;
+ if (bigN < 2) {
+ return this.firstAttempt && (this.workDone > this.workLimit);
+ }
+
+ int hp = 0;
+ while (INCS[hp] < bigN) {
+ hp++;
+ }
+
+ final int[] fmap = dataShadow.fmap;
+ final char[] quadrant = this.quadrant;
+ final byte[] block = dataShadow.block;
+ final int lastPlus1 = lastShadow + 1;
+ final boolean firstAttemptShadow = this.firstAttempt;
+ final int workLimitShadow = this.workLimit;
+ int workDoneShadow = this.workDone;
+
+ // Following block contains unrolled code which could be shortened by
+ // coding it in additional loops.
+
+ HP: while (--hp >= 0) {
+ final int h = INCS[hp];
+ final int mj = lo + h - 1;
+
+ for (int i = lo + h; i <= hi;) {
+ // copy
+ for (int k = 3; (i <= hi) && (--k >= 0); i++) {
+ final int v = fmap[i];
+ final int vd = v + d;
+ int j = i;
+
+ // for (int a;
+ // (j > mj) && mainGtU((a = fmap[j - h]) + d, vd,
+ // block, quadrant, lastShadow);
+ // j -= h) {
+ // fmap[j] = a;
+ // }
+ //
+ // unrolled version:
+
+ // start inline mainGTU
+ boolean onceRunned = false;
+ int a = 0;
+
+ HAMMER: while (true) {
+ if (onceRunned) {
+ fmap[j] = a;
+ if ((j -= h) <= mj) {
+ break HAMMER;
+ }
+ } else {
+ onceRunned = true;
+ }
+
+ a = fmap[j - h];
+ int i1 = a + d;
+ int i2 = vd;
+
+ // following could be done in a loop, but
+ // unrolled it for performance:
+ if (block[i1 + 1] == block[i2 + 1]) {
+ if (block[i1 + 2] == block[i2 + 2]) {
+ if (block[i1 + 3] == block[i2 + 3]) {
+ if (block[i1 + 4] == block[i2 + 4]) {
+ if (block[i1 + 5] == block[i2 + 5]) {
+ if (block[(i1 += 6)] == block[(i2 += 6)]) {
+ int x = lastShadow;
+ X: while (x > 0) {
+ x -= 4;
+
+ if (block[i1 + 1] == block[i2 + 1]) {
+ if (quadrant[i1] == quadrant[i2]) {
+ if (block[i1 + 2] == block[i2 + 2]) {
+ if (quadrant[i1 + 1] == quadrant[i2 + 1]) {
+ if (block[i1 + 3] == block[i2 + 3]) {
+ if (quadrant[i1 + 2] == quadrant[i2 + 2]) {
+ if (block[i1 + 4] == block[i2 + 4]) {
+ if (quadrant[i1 + 3] == quadrant[i2 + 3]) {
+ if ((i1 += 4) >= lastPlus1) {
+ i1 -= lastPlus1;
+ }
+ if ((i2 += 4) >= lastPlus1) {
+ i2 -= lastPlus1;
+ }
+ workDoneShadow++;
+ continue X;
+ } else if ((quadrant[i1 + 3] > quadrant[i2 + 3])) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((block[i1 + 4] & 0xff) > (block[i2 + 4] & 0xff)) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((quadrant[i1 + 2] > quadrant[i2 + 2])) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((block[i1 + 3] & 0xff) > (block[i2 + 3] & 0xff)) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((quadrant[i1 + 1] > quadrant[i2 + 1])) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((block[i1 + 2] & 0xff) > (block[i2 + 2] & 0xff)) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((quadrant[i1] > quadrant[i2])) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((block[i1 + 1] & 0xff) > (block[i2 + 1] & 0xff)) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+
+ }
+ break HAMMER;
+ } // while x > 0
+ else {
+ if ((block[i1] & 0xff) > (block[i2] & 0xff)) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ }
+ } else if ((block[i1 + 5] & 0xff) > (block[i2 + 5] & 0xff)) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((block[i1 + 4] & 0xff) > (block[i2 + 4] & 0xff)) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((block[i1 + 3] & 0xff) > (block[i2 + 3] & 0xff)) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((block[i1 + 2] & 0xff) > (block[i2 + 2] & 0xff)) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+ } else if ((block[i1 + 1] & 0xff) > (block[i2 + 1] & 0xff)) {
+ continue HAMMER;
+ } else {
+ break HAMMER;
+ }
+
+ } // HAMMER
+ // end inline mainGTU
+
+ fmap[j] = v;
+ }
+
+ if (firstAttemptShadow && (i <= hi)
+ && (workDoneShadow > workLimitShadow)) {
+ break HP;
+ }
+ }
+ }
+
+ this.workDone = workDoneShadow;
+ return firstAttemptShadow && (workDoneShadow > workLimitShadow);
+ }
+
+/*--
+ LBZ2: The following is an implementation of
+ an elegant 3-way quicksort for strings,
+ described in a paper "Fast Algorithms for
+ Sorting and Searching Strings", by Robert
+ Sedgewick and Jon L. Bentley.
+--*/
+
+ private static void vswap(int[] fmap, int p1, int p2, int n) {
+ n += p1;
+ while (p1 < n) {
+ int t = fmap[p1];
+ fmap[p1++] = fmap[p2];
+ fmap[p2++] = t;
+ }
+ }
+
+ private static byte med3(byte a, byte b, byte c) {
+ return (a < b) ? (b < c ? b : a < c ? c : a) : (b > c ? b : a > c ? c
+ : a);
+ }
+
+ private static final int SMALL_THRESH = 20;
+ private static final int DEPTH_THRESH = 10;
+ private static final int WORK_FACTOR = 30;
+
+ /**
+ * Method "mainQSort3", file "blocksort.c", BZip2 1.0.2
+ */
+ private void mainQSort3(final BZip2CompressorOutputStream.Data dataShadow,
+ final int loSt, final int hiSt, final int dSt,
+ final int last) {
+ final int[] stack_ll = this.stack_ll;
+ final int[] stack_hh = this.stack_hh;
+ final int[] stack_dd = this.stack_dd;
+ final int[] fmap = dataShadow.fmap;
+ final byte[] block = dataShadow.block;
+
+ stack_ll[0] = loSt;
+ stack_hh[0] = hiSt;
+ stack_dd[0] = dSt;
+
+ for (int sp = 1; --sp >= 0;) {
+ final int lo = stack_ll[sp];
+ final int hi = stack_hh[sp];
+ final int d = stack_dd[sp];
+
+ if ((hi - lo < SMALL_THRESH) || (d > DEPTH_THRESH)) {
+ if (mainSimpleSort(dataShadow, lo, hi, d, last)) {
+ return;
+ }
+ } else {
+ final int d1 = d + 1;
+ final int med = med3(block[fmap[lo] + d1],
+ block[fmap[hi] + d1], block[fmap[(lo + hi) >>> 1] + d1]) & 0xff;
+
+ int unLo = lo;
+ int unHi = hi;
+ int ltLo = lo;
+ int gtHi = hi;
+
+ while (true) {
+ while (unLo <= unHi) {
+ final int n = (block[fmap[unLo] + d1] & 0xff)
+ - med;
+ if (n == 0) {
+ final int temp = fmap[unLo];
+ fmap[unLo++] = fmap[ltLo];
+ fmap[ltLo++] = temp;
+ } else if (n < 0) {
+ unLo++;
+ } else {
+ break;
+ }
+ }
+
+ while (unLo <= unHi) {
+ final int n = (block[fmap[unHi] + d1] & 0xff)
+ - med;
+ if (n == 0) {
+ final int temp = fmap[unHi];
+ fmap[unHi--] = fmap[gtHi];
+ fmap[gtHi--] = temp;
+ } else if (n > 0) {
+ unHi--;
+ } else {
+ break;
+ }
+ }
+
+ if (unLo <= unHi) {
+ final int temp = fmap[unLo];
+ fmap[unLo++] = fmap[unHi];
+ fmap[unHi--] = temp;
+ } else {
+ break;
+ }
+ }
+
+ if (gtHi < ltLo) {
+ stack_ll[sp] = lo;
+ stack_hh[sp] = hi;
+ stack_dd[sp] = d1;
+ sp++;
+ } else {
+ int n = ((ltLo - lo) < (unLo - ltLo)) ? (ltLo - lo)
+ : (unLo - ltLo);
+ vswap(fmap, lo, unLo - n, n);
+ int m = ((hi - gtHi) < (gtHi - unHi)) ? (hi - gtHi)
+ : (gtHi - unHi);
+ vswap(fmap, unLo, hi - m + 1, m);
+
+ n = lo + unLo - ltLo - 1;
+ m = hi - (gtHi - unHi) + 1;
+
+ stack_ll[sp] = lo;
+ stack_hh[sp] = n;
+ stack_dd[sp] = d;
+ sp++;
+
+ stack_ll[sp] = n + 1;
+ stack_hh[sp] = m - 1;
+ stack_dd[sp] = d1;
+ sp++;
+
+ stack_ll[sp] = m;
+ stack_hh[sp] = hi;
+ stack_dd[sp] = d;
+ sp++;
+ }
+ }
+ }
+ }
+
+ private static final int SETMASK = (1 << 21);
+ private static final int CLEARMASK = (~SETMASK);
+
+ final void mainSort(final BZip2CompressorOutputStream.Data dataShadow,
+ final int lastShadow) {
+ final int[] runningOrder = this.mainSort_runningOrder;
+ final int[] copy = this.mainSort_copy;
+ final boolean[] bigDone = this.mainSort_bigDone;
+ final int[] ftab = this.ftab;
+ final byte[] block = dataShadow.block;
+ final int[] fmap = dataShadow.fmap;
+ final char[] quadrant = this.quadrant;
+ final int workLimitShadow = this.workLimit;
+ final boolean firstAttemptShadow = this.firstAttempt;
+
+ // LBZ2: Set up the 2-byte frequency table
+ for (int i = 65537; --i >= 0;) {
+ ftab[i] = 0;
+ }
+
+ /*
+ * In the various block-sized structures, live data runs from 0 to
+ * last+NUM_OVERSHOOT_BYTES inclusive. First, set up the overshoot area
+ * for block.
+ */
+ for (int i = 0; i < BZip2Constants.NUM_OVERSHOOT_BYTES; i++) {
+ block[lastShadow + i + 2] = block[(i % (lastShadow + 1)) + 1];
+ }
+ for (int i = lastShadow + BZip2Constants.NUM_OVERSHOOT_BYTES +1; --i >= 0;) {
+ quadrant[i] = 0;
+ }
+ block[0] = block[lastShadow + 1];
+
+ // LBZ2: Complete the initial radix sort:
+
+ int c1 = block[0] & 0xff;
+ for (int i = 0; i <= lastShadow; i++) {
+ final int c2 = block[i + 1] & 0xff;
+ ftab[(c1 << 8) + c2]++;
+ c1 = c2;
+ }
+
+ for (int i = 1; i <= 65536; i++) {
+ ftab[i] += ftab[i - 1];
+ }
+
+ c1 = block[1] & 0xff;
+ for (int i = 0; i < lastShadow; i++) {
+ final int c2 = block[i + 2] & 0xff;
+ fmap[--ftab[(c1 << 8) + c2]] = i;
+ c1 = c2;
+ }
+
+ fmap[--ftab[((block[lastShadow + 1] & 0xff) << 8) + (block[1] & 0xff)]] = lastShadow;
+
+ /*
+ * LBZ2: Now ftab contains the first loc of every small bucket. Calculate the
+ * running order, from smallest to largest big bucket.
+ */
+ for (int i = 256; --i >= 0;) {
+ bigDone[i] = false;
+ runningOrder[i] = i;
+ }
+
+ for (int h = 364; h != 1;) {
+ h /= 3;
+ for (int i = h; i <= 255; i++) {
+ final int vv = runningOrder[i];
+ final int a = ftab[(vv + 1) << 8] - ftab[vv << 8];
+ final int b = h - 1;
+ int j = i;
+ for (int ro = runningOrder[j - h]; (ftab[(ro + 1) << 8] - ftab[ro << 8]) > a; ro = runningOrder[j
+ - h]) {
+ runningOrder[j] = ro;
+ j -= h;
+ if (j <= b) {
+ break;
+ }
+ }
+ runningOrder[j] = vv;
+ }
+ }
+
+ /*
+ * LBZ2: The main sorting loop.
+ */
+ for (int i = 0; i <= 255; i++) {
+ /*
+ * LBZ2: Process big buckets, starting with the least full.
+ */
+ final int ss = runningOrder[i];
+
+ // Step 1:
+ /*
+ * LBZ2: Complete the big bucket [ss] by quicksorting any unsorted small
+ * buckets [ss, j]. Hopefully previous pointer-scanning phases have
+ * already completed many of the small buckets [ss, j], so we don't
+ * have to sort them at all.
+ */
+ for (int j = 0; j <= 255; j++) {
+ final int sb = (ss << 8) + j;
+ final int ftab_sb = ftab[sb];
+ if ((ftab_sb & SETMASK) != SETMASK) {
+ final int lo = ftab_sb & CLEARMASK;
+ final int hi = (ftab[sb + 1] & CLEARMASK) - 1;
+ if (hi > lo) {
+ mainQSort3(dataShadow, lo, hi, 2, lastShadow);
+ if (firstAttemptShadow
+ && (this.workDone > workLimitShadow)) {
+ return;
+ }
+ }
+ ftab[sb] = ftab_sb | SETMASK;
+ }
+ }
+
+ // Step 2:
+ // LBZ2: Now scan this big bucket so as to synthesise the
+ // sorted order for small buckets [t, ss] for all t != ss.
+
+ for (int j = 0; j <= 255; j++) {
+ copy[j] = ftab[(j << 8) + ss] & CLEARMASK;
+ }
+
+ for (int j = ftab[ss << 8] & CLEARMASK, hj = (ftab[(ss + 1) << 8] & CLEARMASK); j < hj; j++) {
+ final int fmap_j = fmap[j];
+ c1 = block[fmap_j] & 0xff;
+ if (!bigDone[c1]) {
+ fmap[copy[c1]] = (fmap_j == 0) ? lastShadow : (fmap_j - 1);
+ copy[c1]++;
+ }
+ }
+
+ for (int j = 256; --j >= 0;) {
+ ftab[(j << 8) + ss] |= SETMASK;
+ }
+
+ // Step 3:
+ /*
+ * LBZ2: The ss big bucket is now done. Record this fact, and update the
+ * quadrant descriptors. Remember to update quadrants in the
+ * overshoot area too, if necessary. The "if (i < 255)" test merely
+ * skips this updating for the last bucket processed, since updating
+ * for the last bucket is pointless.
+ */
+ bigDone[ss] = true;
+
+ if (i < 255) {
+ final int bbStart = ftab[ss << 8] & CLEARMASK;
+ final int bbSize = (ftab[(ss + 1) << 8] & CLEARMASK) - bbStart;
+ int shifts = 0;
+
+ while ((bbSize >> shifts) > 65534) {
+ shifts++;
+ }
+
+ for (int j = 0; j < bbSize; j++) {
+ final int a2update = fmap[bbStart + j];
+ final char qVal = (char) (j >> shifts);
+ quadrant[a2update] = qVal;
+ if (a2update < BZip2Constants.NUM_OVERSHOOT_BYTES) {
+ quadrant[a2update + lastShadow + 1] = qVal;
+ }
+ }
+ }
+
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/CRC.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/CRC.java
new file mode 100644
index 000000000..ec0502b5d
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/CRC.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.bzip2;
+
+/**
+ * A simple class the hold and calculate the CRC for sanity checking of the
+ * data.
+ * @NotThreadSafe
+ */
+class CRC {
+ private static final int crc32Table[] = {
+ 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9,
+ 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005,
+ 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
+ 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd,
+ 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9,
+ 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
+ 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011,
+ 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd,
+ 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
+ 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5,
+ 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81,
+ 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
+ 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49,
+ 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95,
+ 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
+ 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d,
+ 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae,
+ 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
+ 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16,
+ 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca,
+ 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
+ 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02,
+ 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066,
+ 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
+ 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e,
+ 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692,
+ 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
+ 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a,
+ 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e,
+ 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
+ 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686,
+ 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a,
+ 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
+ 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb,
+ 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f,
+ 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
+ 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47,
+ 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b,
+ 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
+ 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623,
+ 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7,
+ 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
+ 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f,
+ 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3,
+ 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
+ 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b,
+ 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f,
+ 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
+ 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640,
+ 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c,
+ 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
+ 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24,
+ 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30,
+ 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
+ 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088,
+ 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654,
+ 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
+ 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c,
+ 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18,
+ 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
+ 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0,
+ 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c,
+ 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
+ 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
+ };
+
+ CRC() {
+ initialiseCRC();
+ }
+
+ void initialiseCRC() {
+ globalCrc = 0xffffffff;
+ }
+
+ int getFinalCRC() {
+ return ~globalCrc;
+ }
+
+ int getGlobalCRC() {
+ return globalCrc;
+ }
+
+ void setGlobalCRC(int newCrc) {
+ globalCrc = newCrc;
+ }
+
+ void updateCRC(int inCh) {
+ int temp = (globalCrc >> 24) ^ inCh;
+ if (temp < 0) {
+ temp = 256 + temp;
+ }
+ globalCrc = (globalCrc << 8) ^ CRC.crc32Table[temp];
+ }
+
+ void updateCRC(int inCh, int repeat) {
+ int globalCrcShadow = this.globalCrc;
+ while (repeat-- > 0) {
+ int temp = (globalCrcShadow >> 24) ^ inCh;
+ globalCrcShadow = (globalCrcShadow << 8) ^ crc32Table[(temp >= 0)
+ ? temp
+ : (temp + 256)];
+ }
+ this.globalCrc = globalCrcShadow;
+ }
+
+ private int globalCrc;
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/Rand.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/Rand.java
new file mode 100644
index 000000000..0c08d1f7e
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/Rand.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.compressors.bzip2;
+
+/**
+ * Random numbers for both the compress and decompress BZip2 classes.
+ */
+final class Rand {
+
+ private static final int[] RNUMS = {
+ 619, 720, 127, 481, 931, 816, 813, 233, 566, 247,
+ 985, 724, 205, 454, 863, 491, 741, 242, 949, 214,
+ 733, 859, 335, 708, 621, 574, 73, 654, 730, 472,
+ 419, 436, 278, 496, 867, 210, 399, 680, 480, 51,
+ 878, 465, 811, 169, 869, 675, 611, 697, 867, 561,
+ 862, 687, 507, 283, 482, 129, 807, 591, 733, 623,
+ 150, 238, 59, 379, 684, 877, 625, 169, 643, 105,
+ 170, 607, 520, 932, 727, 476, 693, 425, 174, 647,
+ 73, 122, 335, 530, 442, 853, 695, 249, 445, 515,
+ 909, 545, 703, 919, 874, 474, 882, 500, 594, 612,
+ 641, 801, 220, 162, 819, 984, 589, 513, 495, 799,
+ 161, 604, 958, 533, 221, 400, 386, 867, 600, 782,
+ 382, 596, 414, 171, 516, 375, 682, 485, 911, 276,
+ 98, 553, 163, 354, 666, 933, 424, 341, 533, 870,
+ 227, 730, 475, 186, 263, 647, 537, 686, 600, 224,
+ 469, 68, 770, 919, 190, 373, 294, 822, 808, 206,
+ 184, 943, 795, 384, 383, 461, 404, 758, 839, 887,
+ 715, 67, 618, 276, 204, 918, 873, 777, 604, 560,
+ 951, 160, 578, 722, 79, 804, 96, 409, 713, 940,
+ 652, 934, 970, 447, 318, 353, 859, 672, 112, 785,
+ 645, 863, 803, 350, 139, 93, 354, 99, 820, 908,
+ 609, 772, 154, 274, 580, 184, 79, 626, 630, 742,
+ 653, 282, 762, 623, 680, 81, 927, 626, 789, 125,
+ 411, 521, 938, 300, 821, 78, 343, 175, 128, 250,
+ 170, 774, 972, 275, 999, 639, 495, 78, 352, 126,
+ 857, 956, 358, 619, 580, 124, 737, 594, 701, 612,
+ 669, 112, 134, 694, 363, 992, 809, 743, 168, 974,
+ 944, 375, 748, 52, 600, 747, 642, 182, 862, 81,
+ 344, 805, 988, 739, 511, 655, 814, 334, 249, 515,
+ 897, 955, 664, 981, 649, 113, 974, 459, 893, 228,
+ 433, 837, 553, 268, 926, 240, 102, 654, 459, 51,
+ 686, 754, 806, 760, 493, 403, 415, 394, 687, 700,
+ 946, 670, 656, 610, 738, 392, 760, 799, 887, 653,
+ 978, 321, 576, 617, 626, 502, 894, 679, 243, 440,
+ 680, 879, 194, 572, 640, 724, 926, 56, 204, 700,
+ 707, 151, 457, 449, 797, 195, 791, 558, 945, 679,
+ 297, 59, 87, 824, 713, 663, 412, 693, 342, 606,
+ 134, 108, 571, 364, 631, 212, 174, 643, 304, 329,
+ 343, 97, 430, 751, 497, 314, 983, 374, 822, 928,
+ 140, 206, 73, 263, 980, 736, 876, 478, 430, 305,
+ 170, 514, 364, 692, 829, 82, 855, 953, 676, 246,
+ 369, 970, 294, 750, 807, 827, 150, 790, 288, 923,
+ 804, 378, 215, 828, 592, 281, 565, 555, 710, 82,
+ 896, 831, 547, 261, 524, 462, 293, 465, 502, 56,
+ 661, 821, 976, 991, 658, 869, 905, 758, 745, 193,
+ 768, 550, 608, 933, 378, 286, 215, 979, 792, 961,
+ 61, 688, 793, 644, 986, 403, 106, 366, 905, 644,
+ 372, 567, 466, 434, 645, 210, 389, 550, 919, 135,
+ 780, 773, 635, 389, 707, 100, 626, 958, 165, 504,
+ 920, 176, 193, 713, 857, 265, 203, 50, 668, 108,
+ 645, 990, 626, 197, 510, 357, 358, 850, 858, 364,
+ 936, 638
+ };
+
+ /**
+ * Return the random number at a specific index.
+ *
+ * @param i the index
+ * @return the random number
+ */
+ static int rNums(int i){
+ return RNUMS[i];
+ }
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/package.html
new file mode 100644
index 000000000..fe27e6e66
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/bzip2/package.html
@@ -0,0 +1,24 @@
+
+
+
+ Provides stream classes for compressing and decompressing
+ streams using the BZip2 algorithm.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipCompressorInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipCompressorInputStream.java
new file mode 100644
index 000000000..bff7021a1
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipCompressorInputStream.java
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.gzip;
+
+import java.io.IOException;
+import java.io.EOFException;
+import java.io.InputStream;
+import java.io.DataInputStream;
+import java.io.BufferedInputStream;
+import java.util.zip.DataFormatException;
+import java.util.zip.Inflater;
+import java.util.zip.CRC32;
+
+import org.apache.commons.compress.compressors.CompressorInputStream;
+
+/**
+ * Input stream that decompresses .gz files.
+ * This supports decompressing concatenated .gz files which is important
+ * when decompressing standalone .gz files.
+ *
+ * {@link java.util.zip.GZIPInputStream} doesn't decompress concatenated .gz
+ * files: it stops after the first member and silently ignores the rest.
+ * It doesn't leave the read position to point to the beginning of the next
+ * member, which makes it difficult workaround the lack of concatenation
+ * support.
+ *
+ * Instead of using GZIPInputStream, this class has its own .gz
+ * container format decoder. The actual decompression is done with
+ * {@link java.util.zip.Inflater}.
+ */
+public class GzipCompressorInputStream extends CompressorInputStream {
+ // Header flags
+ // private static final int FTEXT = 0x01; // Uninteresting for us
+ private static final int FHCRC = 0x02;
+ private static final int FEXTRA = 0x04;
+ private static final int FNAME = 0x08;
+ private static final int FCOMMENT = 0x10;
+ private static final int FRESERVED = 0xE0;
+
+ // Compressed input stream, possibly wrapped in a BufferedInputStream
+ private final InputStream in;
+
+ // True if decompressing multimember streams.
+ private final boolean decompressConcatenated;
+
+ // Buffer to hold the input data
+ private final byte[] buf = new byte[8192];
+
+ // Amount of data in buf.
+ private int bufUsed = 0;
+
+ // Decompressor
+ private Inflater inf = new Inflater(true);
+
+ // CRC32 from uncompressed data
+ private final CRC32 crc = new CRC32();
+
+ private int memberSize;
+
+ // True once everything has been decompressed
+ private boolean endReached = false;
+
+ // used in no-arg read method
+ private final byte[] oneByte = new byte[1];
+
+ /**
+ * Constructs a new input stream that decompresses gzip-compressed data
+ * from the specified input stream.
+ *
+ * This is equivalent to
+ * GzipCompressorInputStream(inputStream, false) and thus
+ * will not decompress concatenated .gz files.
+ *
+ * @param inputStream the InputStream from which this object should
+ * be created of
+ *
+ * @throws IOException if the stream could not be created
+ */
+ public GzipCompressorInputStream(InputStream inputStream)
+ throws IOException {
+ this(inputStream, false);
+ }
+
+ /**
+ * Constructs a new input stream that decompresses gzip-compressed data
+ * from the specified input stream.
+ *
+ * If decompressConcatenated is {@code false}:
+ * This decompressor might read more input than it will actually use.
+ * If inputStream supports mark and
+ * reset, then the input position will be adjusted
+ * so that it is right after the last byte of the compressed stream.
+ * If mark isn't supported, the input position will be
+ * undefined.
+ *
+ * @param inputStream the InputStream from which this object should
+ * be created of
+ * @param decompressConcatenated
+ * if true, decompress until the end of the input;
+ * if false, stop after the first .gz member
+ *
+ * @throws IOException if the stream could not be created
+ */
+ public GzipCompressorInputStream(InputStream inputStream,
+ boolean decompressConcatenated)
+ throws IOException {
+ // Mark support is strictly needed for concatenated files only,
+ // but it's simpler if it is always available.
+ if (inputStream.markSupported()) {
+ in = inputStream;
+ } else {
+ in = new BufferedInputStream(inputStream);
+ }
+
+ this.decompressConcatenated = decompressConcatenated;
+ init(true);
+ }
+
+ private boolean init(boolean isFirstMember) throws IOException {
+ assert isFirstMember || decompressConcatenated;
+
+ // Check the magic bytes without a possibility of EOFException.
+ int magic0 = in.read();
+ int magic1 = in.read();
+
+ // If end of input was reached after decompressing at least
+ // one .gz member, we have reached the end of the file successfully.
+ if (magic0 == -1 && !isFirstMember) {
+ return false;
+ }
+
+ if (magic0 != 31 || magic1 != 139) {
+ throw new IOException(isFirstMember
+ ? "Input is not in the .gz format"
+ : "Garbage after a valid .gz stream");
+ }
+
+ // Parsing the rest of the header may throw EOFException.
+ DataInputStream inData = new DataInputStream(in);
+ int method = inData.readUnsignedByte();
+ if (method != 8) {
+ throw new IOException("Unsupported compression method "
+ + method + " in the .gz header");
+ }
+
+ int flg = inData.readUnsignedByte();
+ if ((flg & FRESERVED) != 0) {
+ throw new IOException(
+ "Reserved flags are set in the .gz header");
+ }
+
+ inData.readInt(); // mtime, ignored
+ inData.readUnsignedByte(); // extra flags, ignored
+ inData.readUnsignedByte(); // operating system, ignored
+
+ // Extra field, ignored
+ if ((flg & FEXTRA) != 0) {
+ int xlen = inData.readUnsignedByte();
+ xlen |= inData.readUnsignedByte() << 8;
+
+ // This isn't as efficient as calling in.skip would be,
+ // but it's lazier to handle unexpected end of input this way.
+ // Most files don't have an extra field anyway.
+ while (xlen-- > 0) {
+ inData.readUnsignedByte();
+ }
+ }
+
+ // Original file name, ignored
+ if ((flg & FNAME) != 0) {
+ readToNull(inData);
+ }
+
+ // Comment, ignored
+ if ((flg & FCOMMENT) != 0) {
+ readToNull(inData);
+ }
+
+ // Header "CRC16" which is actually a truncated CRC32 (which isn't
+ // as good as real CRC16). I don't know if any encoder implementation
+ // sets this, so it's not worth trying to verify it. GNU gzip 1.4
+ // doesn't support this field, but zlib seems to be able to at least
+ // skip over it.
+ if ((flg & FHCRC) != 0) {
+ inData.readShort();
+ }
+
+ // Reset
+ inf.reset();
+ crc.reset();
+ memberSize = 0;
+
+ return true;
+ }
+
+ private void readToNull(DataInputStream inData) throws IOException {
+ while (inData.readUnsignedByte() != 0x00) { // NOPMD
+ }
+ }
+
+ @Override
+ public int read() throws IOException {
+ return read(oneByte, 0, 1) == -1 ? -1 : oneByte[0] & 0xFF;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @since 1.1
+ */
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ if (endReached) {
+ return -1;
+ }
+
+ int size = 0;
+
+ while (len > 0) {
+ if (inf.needsInput()) {
+ // Remember the current position because we may need to
+ // rewind after reading too much input.
+ in.mark(buf.length);
+
+ bufUsed = in.read(buf);
+ if (bufUsed == -1) {
+ throw new EOFException();
+ }
+
+ inf.setInput(buf, 0, bufUsed);
+ }
+
+ int ret;
+ try {
+ ret = inf.inflate(b, off, len);
+ } catch (DataFormatException e) {
+ throw new IOException("Gzip-compressed data is corrupt");
+ }
+
+ crc.update(b, off, ret);
+ memberSize += ret;
+ off += ret;
+ len -= ret;
+ size += ret;
+ count(ret);
+
+ if (inf.finished()) {
+ // We may have read too many bytes. Rewind the read
+ // position to match the actual amount used.
+ //
+ // NOTE: The "if" is there just in case. Since we used
+ // in.mark earler, it should always skip enough.
+ in.reset();
+
+ int skipAmount = bufUsed - inf.getRemaining();
+ if (in.skip(skipAmount) != skipAmount) {
+ throw new IOException();
+ }
+
+ bufUsed = 0;
+
+ DataInputStream inData = new DataInputStream(in);
+
+ // CRC32
+ long crcStored = 0;
+ for (int i = 0; i < 4; ++i) {
+ crcStored |= (long)inData.readUnsignedByte() << (i * 8);
+ }
+
+ if (crcStored != crc.getValue()) {
+ throw new IOException("Gzip-compressed data is corrupt "
+ + "(CRC32 error)");
+ }
+
+ // Uncompressed size modulo 2^32 (ISIZE in the spec)
+ int isize = 0;
+ for (int i = 0; i < 4; ++i) {
+ isize |= inData.readUnsignedByte() << (i * 8);
+ }
+
+ if (isize != memberSize) {
+ throw new IOException("Gzip-compressed data is corrupt"
+ + "(uncompressed size mismatch)");
+ }
+
+ // See if this is the end of the file.
+ if (!decompressConcatenated || !init(false)) {
+ inf.end();
+ inf = null;
+ endReached = true;
+ return size == 0 ? -1 : size;
+ }
+ }
+ }
+
+ return size;
+ }
+
+ /**
+ * Checks if the signature matches what is expected for a .gz file.
+ *
+ * @param signature the bytes to check
+ * @param length the number of bytes to check
+ * @return true if this is a .gz stream, false otherwise
+ *
+ * @since 1.1
+ */
+ public static boolean matches(byte[] signature, int length) {
+
+ if (length < 2) {
+ return false;
+ }
+
+ if (signature[0] != 31) {
+ return false;
+ }
+
+ if (signature[1] != -117) {
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Closes the input stream (unless it is System.in).
+ *
+ * @since 1.2
+ */
+ @Override
+ public void close() throws IOException {
+ if (inf != null) {
+ inf.end();
+ inf = null;
+ }
+
+ if (this.in != System.in) {
+ this.in.close();
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipCompressorOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipCompressorOutputStream.java
new file mode 100644
index 000000000..43c62e7d6
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipCompressorOutputStream.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.gzip;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.zip.CRC32;
+import java.util.zip.Deflater;
+import java.util.zip.GZIPInputStream;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.commons.compress.compressors.CompressorOutputStream;
+
+/**
+ * Compressed output stream using the gzip format. This implementation improves
+ * over the standard {@link GZIPOutputStream} class by allowing
+ * the configuration of the compression level and the header metadata (filename,
+ * comment, modification time, operating system and extra flags).
+ *
+ * @see GZIP File Format Specification
+ */
+public class GzipCompressorOutputStream extends CompressorOutputStream {
+
+ /** Header flag indicating a file name follows the header */
+ private static final int FNAME = 1 << 3;
+
+ /** Header flag indicating a comment follows the header */
+ private static final int FCOMMENT = 1 << 4;
+
+ /** The underlying stream */
+ private final OutputStream out;
+
+ /** Deflater used to compress the data */
+ private final Deflater deflater;
+
+ /** The buffer receiving the compressed data from the deflater */
+ private final byte[] deflateBuffer = new byte[512];
+
+ /** Indicates if the stream has been closed */
+ private boolean closed;
+
+ /** The checksum of the uncompressed data */
+ private final CRC32 crc = new CRC32();
+
+ /**
+ * Creates a gzip compressed output stream with the default parameters.
+ */
+ public GzipCompressorOutputStream(OutputStream out) throws IOException {
+ this(out, new GzipParameters());
+ }
+
+ /**
+ * Creates a gzip compressed output stream with the specified parameters.
+ *
+ * @since 1.7
+ */
+ public GzipCompressorOutputStream(OutputStream out, GzipParameters parameters) throws IOException {
+ this.out = out;
+ this.deflater = new Deflater(parameters.getCompressionLevel(), true);
+
+ writeHeader(parameters);
+ }
+
+ private void writeHeader(GzipParameters parameters) throws IOException {
+ String filename = parameters.getFilename();
+ String comment = parameters.getComment();
+
+ ByteBuffer buffer = ByteBuffer.allocate(10);
+ buffer.order(ByteOrder.LITTLE_ENDIAN);
+ buffer.putShort((short) GZIPInputStream.GZIP_MAGIC);
+ buffer.put((byte) Deflater.DEFLATED); // compression method (8: deflate)
+ buffer.put((byte) ((filename != null ? FNAME : 0) | (comment != null ? FCOMMENT : 0))); // flags
+ buffer.putInt((int) (parameters.getModificationTime() / 1000));
+
+ // extra flags
+ int compressionLevel = parameters.getCompressionLevel();
+ if (compressionLevel == Deflater.BEST_COMPRESSION) {
+ buffer.put((byte) 2);
+ } else if (compressionLevel == Deflater.BEST_SPEED) {
+ buffer.put((byte) 4);
+ } else {
+ buffer.put((byte) 0);
+ }
+
+ buffer.put((byte) parameters.getOperatingSystem());
+
+ out.write(buffer.array());
+
+ if (filename != null) {
+ out.write(filename.getBytes("ISO-8859-1"));
+ out.write(0);
+ }
+
+ if (comment != null) {
+ out.write(comment.getBytes("ISO-8859-1"));
+ out.write(0);
+ }
+ }
+
+ private void writeTrailer() throws IOException {
+ ByteBuffer buffer = ByteBuffer.allocate(8);
+ buffer.order(ByteOrder.LITTLE_ENDIAN);
+ buffer.putInt((int) crc.getValue());
+ buffer.putInt(deflater.getTotalIn());
+
+ out.write(buffer.array());
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ write(new byte[]{(byte) (b & 0xff)}, 0, 1);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @since 1.1
+ */
+ @Override
+ public void write(byte[] buffer) throws IOException {
+ write(buffer, 0, buffer.length);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @since 1.1
+ */
+ @Override
+ public void write(byte[] buffer, int offset, int length) throws IOException {
+ if (deflater.finished()) {
+ throw new IOException("Cannot write more data, the end of the compressed data stream has been reached");
+
+ } else if (length > 0) {
+ deflater.setInput(buffer, offset, length);
+
+ while (!deflater.needsInput()) {
+ deflate();
+ }
+
+ crc.update(buffer, offset, length);
+ }
+ }
+
+ private void deflate() throws IOException {
+ int length = deflater.deflate(deflateBuffer, 0, deflateBuffer.length);
+ if (length > 0) {
+ out.write(deflateBuffer, 0, length);
+ }
+ }
+
+ /**
+ * Finishes writing compressed data to the underlying stream without closing it.
+ *
+ * @since 1.7
+ */
+ public void finish() throws IOException {
+ if (!deflater.finished()) {
+ deflater.finish();
+
+ while (!deflater.finished()) {
+ deflate();
+ }
+
+ writeTrailer();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @since 1.7
+ */
+ @Override
+ public void flush() throws IOException {
+ out.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (!closed) {
+ finish();
+ deflater.end();
+ out.close();
+ closed = true;
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipParameters.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipParameters.java
new file mode 100644
index 000000000..ef9a1afeb
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipParameters.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.compressors.gzip;
+
+import java.util.zip.Deflater;
+
+/**
+ * Parameters for the GZIP compressor.
+ *
+ * @since 1.7
+ */
+public class GzipParameters {
+
+ private int compressionLevel = Deflater.DEFAULT_COMPRESSION;
+ private long modificationTime;
+ private String filename;
+ private String comment;
+ private int operatingSystem = 255; // Unknown OS by default
+
+ public int getCompressionLevel() {
+ return compressionLevel;
+ }
+
+ /**
+ * Sets the compression level.
+ *
+ * @param compressionLevel the compression level (between 0 and 9)
+ * @see Deflater#NO_COMPRESSION
+ * @see Deflater#BEST_SPEED
+ * @see Deflater#DEFAULT_COMPRESSION
+ * @see Deflater#BEST_COMPRESSION
+ */
+ public void setCompressionLevel(int compressionLevel) {
+ if (compressionLevel < -1 || compressionLevel > 9) {
+ throw new IllegalArgumentException("Invalid gzip compression level: " + compressionLevel);
+ }
+ this.compressionLevel = compressionLevel;
+ }
+
+ public long getModificationTime() {
+ return modificationTime;
+ }
+
+ /**
+ * Sets the modification time of the compressed file.
+ *
+ * @param modificationTime the modification time, in milliseconds
+ */
+ public void setModificationTime(long modificationTime) {
+ this.modificationTime = modificationTime;
+ }
+
+ public String getFilename() {
+ return filename;
+ }
+
+ /**
+ * Sets the name of the compressed file.
+ *
+ * @param filename the name of the file without the directory path
+ */
+ public void setFilename(String filename) {
+ this.filename = filename;
+ }
+
+ public String getComment() {
+ return comment;
+ }
+
+ public void setComment(String comment) {
+ this.comment = comment;
+ }
+
+ public int getOperatingSystem() {
+ return operatingSystem;
+ }
+
+ /**
+ * Sets the operating system on which the compression took place.
+ * The defined values are:
+ *
+ * - 0: FAT filesystem (MS-DOS, OS/2, NT/Win32)
+ * - 1: Amiga
+ * - 2: VMS (or OpenVMS)
+ * - 3: Unix
+ * - 4: VM/CMS
+ * - 5: Atari TOS
+ * - 6: HPFS filesystem (OS/2, NT)
+ * - 7: Macintosh
+ * - 8: Z-System
+ * - 9: CP/M
+ * - 10: TOPS-20
+ * - 11: NTFS filesystem (NT)
+ * - 12: QDOS
+ * - 13: Acorn RISCOS
+ * - 255: Unknown
+ *
+ *
+ * @param operatingSystem the code of the operating system
+ */
+ public void setOperatingSystem(int operatingSystem) {
+ this.operatingSystem = operatingSystem;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipUtils.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipUtils.java
new file mode 100644
index 000000000..40be4374c
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/GzipUtils.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.gzip;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+import org.apache.commons.compress.compressors.FileNameUtil;
+
+/**
+ * Utility code for the gzip compression format.
+ * @ThreadSafe
+ */
+public class GzipUtils {
+
+ private static final FileNameUtil fileNameUtil;
+
+ static {
+ // using LinkedHashMap so .tgz is preferred over .taz as
+ // compressed extension of .tar as FileNameUtil will use the
+ // first one found
+ Map uncompressSuffix =
+ new LinkedHashMap();
+ uncompressSuffix.put(".tgz", ".tar");
+ uncompressSuffix.put(".taz", ".tar");
+ uncompressSuffix.put(".svgz", ".svg");
+ uncompressSuffix.put(".cpgz", ".cpio");
+ uncompressSuffix.put(".wmz", ".wmf");
+ uncompressSuffix.put(".emz", ".emf");
+ uncompressSuffix.put(".gz", "");
+ uncompressSuffix.put(".z", "");
+ uncompressSuffix.put("-gz", "");
+ uncompressSuffix.put("-z", "");
+ uncompressSuffix.put("_z", "");
+ fileNameUtil = new FileNameUtil(uncompressSuffix, ".gz");
+ }
+
+ /** Private constructor to prevent instantiation of this utility class. */
+ private GzipUtils() {
+ }
+
+ /**
+ * Detects common gzip suffixes in the given filename.
+ *
+ * @param filename name of a file
+ * @return {@code true} if the filename has a common gzip suffix,
+ * {@code false} otherwise
+ */
+ public static boolean isCompressedFilename(String filename) {
+ return fileNameUtil.isCompressedFilename(filename);
+ }
+
+ /**
+ * Maps the given name of a gzip-compressed file to the name that the
+ * file should have after uncompression. Commonly used file type specific
+ * suffixes like ".tgz" or ".svgz" are automatically detected and
+ * correctly mapped. For example the name "package.tgz" is mapped to
+ * "package.tar". And any filenames with the generic ".gz" suffix
+ * (or any other generic gzip suffix) is mapped to a name without that
+ * suffix. If no gzip suffix is detected, then the filename is returned
+ * unmapped.
+ *
+ * @param filename name of a file
+ * @return name of the corresponding uncompressed file
+ */
+ public static String getUncompressedFilename(String filename) {
+ return fileNameUtil.getUncompressedFilename(filename);
+ }
+
+ /**
+ * Maps the given filename to the name that the file should have after
+ * compression with gzip. Common file types with custom suffixes for
+ * compressed versions are automatically detected and correctly mapped.
+ * For example the name "package.tar" is mapped to "package.tgz". If no
+ * custom mapping is applicable, then the default ".gz" suffix is appended
+ * to the filename.
+ *
+ * @param filename name of a file
+ * @return name of the corresponding compressed file
+ */
+ public static String getCompressedFilename(String filename) {
+ return fileNameUtil.getCompressedFilename(filename);
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/package.html
new file mode 100644
index 000000000..e18b50f2f
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/gzip/package.html
@@ -0,0 +1,29 @@
+
+
+
+ Provides stream classes for compressing and decompressing
+ streams using the GZip algorithm.
+
+ The classes in this package are wrappers around {@link
+ java.util.zip.GZIPInputStream java.util.zip.GZIPInputStream} and
+ {@link java.util.zip.GZIPOutputStream
+ java.util.zip.GZIPOutputStream}.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/InMemoryCachingStreamBridge.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/InMemoryCachingStreamBridge.java
new file mode 100644
index 000000000..e1fdc2cba
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/InMemoryCachingStreamBridge.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.compressors.pack200;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * StreamSwitcher that caches all data written to the output side in
+ * memory.
+ * @since 1.3
+ */
+class InMemoryCachingStreamBridge extends StreamBridge {
+ InMemoryCachingStreamBridge() {
+ super(new ByteArrayOutputStream());
+ }
+
+ @Override
+ InputStream getInputView() throws IOException {
+ return new ByteArrayInputStream(((ByteArrayOutputStream) out)
+ .toByteArray());
+ }
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorInputStream.java
new file mode 100644
index 000000000..fa04aef35
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorInputStream.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.compressors.pack200;
+
+import java.io.File;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Map;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Pack200;
+
+import org.apache.commons.compress.compressors.CompressorInputStream;
+
+/**
+ * An input stream that decompresses from the Pack200 format to be read
+ * as any other stream.
+ *
+ * The {@link CompressorInputStream#getCount getCount} and {@link
+ * CompressorInputStream#getBytesRead getBytesRead} methods always
+ * return 0.
+ *
+ * @NotThreadSafe
+ * @since 1.3
+ */
+public class Pack200CompressorInputStream extends CompressorInputStream {
+ private final InputStream originalInput;
+ private final StreamBridge streamBridge;
+
+ /**
+ * Decompresses the given stream, caching the decompressed data in
+ * memory.
+ *
+ * When reading from a file the File-arg constructor may
+ * provide better performance.
+ */
+ public Pack200CompressorInputStream(final InputStream in)
+ throws IOException {
+ this(in, Pack200Strategy.IN_MEMORY);
+ }
+
+ /**
+ * Decompresses the given stream using the given strategy to cache
+ * the results.
+ *
+ * When reading from a file the File-arg constructor may
+ * provide better performance.
+ */
+ public Pack200CompressorInputStream(final InputStream in,
+ final Pack200Strategy mode)
+ throws IOException {
+ this(in, null, mode, null);
+ }
+
+ /**
+ * Decompresses the given stream, caching the decompressed data in
+ * memory and using the given properties.
+ *
+ * When reading from a file the File-arg constructor may
+ * provide better performance.
+ */
+ public Pack200CompressorInputStream(final InputStream in,
+ final Map props)
+ throws IOException {
+ this(in, Pack200Strategy.IN_MEMORY, props);
+ }
+
+ /**
+ * Decompresses the given stream using the given strategy to cache
+ * the results and the given properties.
+ *
+ * When reading from a file the File-arg constructor may
+ * provide better performance.
+ */
+ public Pack200CompressorInputStream(final InputStream in,
+ final Pack200Strategy mode,
+ final Map props)
+ throws IOException {
+ this(in, null, mode, props);
+ }
+
+ /**
+ * Decompresses the given file, caching the decompressed data in
+ * memory.
+ */
+ public Pack200CompressorInputStream(final File f) throws IOException {
+ this(f, Pack200Strategy.IN_MEMORY);
+ }
+
+ /**
+ * Decompresses the given file using the given strategy to cache
+ * the results.
+ */
+ public Pack200CompressorInputStream(final File f, final Pack200Strategy mode)
+ throws IOException {
+ this(null, f, mode, null);
+ }
+
+ /**
+ * Decompresses the given file, caching the decompressed data in
+ * memory and using the given properties.
+ */
+ public Pack200CompressorInputStream(final File f,
+ final Map props)
+ throws IOException {
+ this(f, Pack200Strategy.IN_MEMORY, props);
+ }
+
+ /**
+ * Decompresses the given file using the given strategy to cache
+ * the results and the given properties.
+ */
+ public Pack200CompressorInputStream(final File f, final Pack200Strategy mode,
+ final Map props)
+ throws IOException {
+ this(null, f, mode, props);
+ }
+
+ private Pack200CompressorInputStream(final InputStream in, final File f,
+ final Pack200Strategy mode,
+ final Map props)
+ throws IOException {
+ originalInput = in;
+ streamBridge = mode.newStreamBridge();
+ JarOutputStream jarOut = new JarOutputStream(streamBridge);
+ Pack200.Unpacker u = Pack200.newUnpacker();
+ if (props != null) {
+ u.properties().putAll(props);
+ }
+ if (f == null) {
+ u.unpack(new FilterInputStream(in) {
+ @Override
+ public void close() {
+ // unpack would close this stream but we
+ // want to give the user code more control
+ }
+ },
+ jarOut);
+ } else {
+ u.unpack(f, jarOut);
+ }
+ jarOut.close();
+ }
+
+ @Override
+ public int read() throws IOException {
+ return streamBridge.getInput().read();
+ }
+
+ @Override
+ public int read(byte[] b) throws IOException {
+ return streamBridge.getInput().read(b);
+ }
+
+ @Override
+ public int read(byte[] b, int off, int count) throws IOException {
+ return streamBridge.getInput().read(b, off, count);
+ }
+
+ @Override
+ public int available() throws IOException {
+ return streamBridge.getInput().available();
+ }
+
+ @Override
+ public boolean markSupported() {
+ try {
+ return streamBridge.getInput().markSupported();
+ } catch (IOException ex) {
+ return false;
+ }
+ }
+
+ @Override
+ public void mark(int limit) {
+ try {
+ streamBridge.getInput().mark(limit);
+ } catch (IOException ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ @Override
+ public void reset() throws IOException {
+ streamBridge.getInput().reset();
+ }
+
+ @Override
+ public long skip(long count) throws IOException {
+ return streamBridge.getInput().skip(count);
+ }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ streamBridge.stop();
+ } finally {
+ if (originalInput != null) {
+ originalInput.close();
+ }
+ }
+ }
+
+ private static final byte[] CAFE_DOOD = new byte[] {
+ (byte) 0xCA, (byte) 0xFE, (byte) 0xD0, (byte) 0x0D
+ };
+ private static final int SIG_LENGTH = CAFE_DOOD.length;
+
+ /**
+ * Checks if the signature matches what is expected for a pack200
+ * file (0xCAFED00D).
+ *
+ * @param signature
+ * the bytes to check
+ * @param length
+ * the number of bytes to check
+ * @return true, if this stream is a pack200 compressed stream,
+ * false otherwise
+ */
+ public static boolean matches(byte[] signature, int length) {
+ if (length < SIG_LENGTH) {
+ return false;
+ }
+
+ for (int i = 0; i < SIG_LENGTH; i++) {
+ if (signature[i] != CAFE_DOOD[i]) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorOutputStream.java
new file mode 100644
index 000000000..cfb315d9e
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorOutputStream.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.compressors.pack200;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Map;
+import java.util.jar.JarInputStream;
+import java.util.jar.Pack200;
+
+import org.apache.commons.compress.compressors.CompressorOutputStream;
+import org.apache.commons.compress.utils.IOUtils;
+
+/**
+ * An output stream that compresses using the Pack200 format.
+ *
+ * @NotThreadSafe
+ * @since 1.3
+ */
+public class Pack200CompressorOutputStream extends CompressorOutputStream {
+ private boolean finished = false;
+ private final OutputStream originalOutput;
+ private final StreamBridge streamBridge;
+ private final Map properties;
+
+ /**
+ * Compresses the given stream, caching the compressed data in
+ * memory.
+ */
+ public Pack200CompressorOutputStream(final OutputStream out)
+ throws IOException {
+ this(out, Pack200Strategy.IN_MEMORY);
+ }
+
+ /**
+ * Compresses the given stream using the given strategy to cache
+ * the results.
+ */
+ public Pack200CompressorOutputStream(final OutputStream out,
+ final Pack200Strategy mode)
+ throws IOException {
+ this(out, mode, null);
+ }
+
+ /**
+ * Compresses the given stream, caching the compressed data in
+ * memory and using the given properties.
+ */
+ public Pack200CompressorOutputStream(final OutputStream out,
+ final Map props)
+ throws IOException {
+ this(out, Pack200Strategy.IN_MEMORY, props);
+ }
+
+ /**
+ * Compresses the given stream using the given strategy to cache
+ * the results and the given properties.
+ */
+ public Pack200CompressorOutputStream(final OutputStream out,
+ final Pack200Strategy mode,
+ final Map props)
+ throws IOException {
+ originalOutput = out;
+ streamBridge = mode.newStreamBridge();
+ properties = props;
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ streamBridge.write(b);
+ }
+
+ @Override
+ public void write(byte[] b) throws IOException {
+ streamBridge.write(b);
+ }
+
+ @Override
+ public void write(byte[] b, int from, int length) throws IOException {
+ streamBridge.write(b, from, length);
+ }
+
+ @Override
+ public void close() throws IOException {
+ finish();
+ try {
+ streamBridge.stop();
+ } finally {
+ originalOutput.close();
+ }
+ }
+
+ public void finish() throws IOException {
+ if (!finished) {
+ finished = true;
+ Pack200.Packer p = Pack200.newPacker();
+ if (properties != null) {
+ p.properties().putAll(properties);
+ }
+ JarInputStream ji = null;
+ boolean success = false;
+ try {
+ p.pack(ji = new JarInputStream(streamBridge.getInput()),
+ originalOutput);
+ success = true;
+ } finally {
+ if (!success) {
+ IOUtils.closeQuietly(ji);
+ }
+ }
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200Strategy.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200Strategy.java
new file mode 100644
index 000000000..dba199296
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200Strategy.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.compressors.pack200;
+
+import java.io.IOException;
+
+/**
+ * The different modes the Pack200 streams can use to wrap input and
+ * output.
+ * @since 1.3
+ */
+public enum Pack200Strategy {
+ /** Cache output in memory */
+ IN_MEMORY() {
+ @Override
+ StreamBridge newStreamBridge() {
+ return new InMemoryCachingStreamBridge();
+ }
+ },
+ /** Cache output in a temporary file */
+ TEMP_FILE() {
+ @Override
+ StreamBridge newStreamBridge() throws IOException {
+ return new TempFileCachingStreamBridge();
+ }
+ };
+
+ abstract StreamBridge newStreamBridge() throws IOException;
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200Utils.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200Utils.java
new file mode 100644
index 000000000..1f944dacc
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/Pack200Utils.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.compressors.pack200;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.jar.JarFile;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Pack200;
+
+/**
+ * Utility methods for Pack200.
+ *
+ * @ThreadSafe
+ * @since 1.3
+ */
+public class Pack200Utils {
+ private Pack200Utils() { }
+
+ /**
+ * Normalizes a JAR archive in-place so it can be safely signed
+ * and packed.
+ *
+ * As stated in Pack200.Packer's
+ * javadocs applying a Pack200 compression to a JAR archive will
+ * in general make its sigantures invalid. In order to prepare a
+ * JAR for signing it should be "normalized" by packing and
+ * unpacking it. This is what this method does.
+ *
+ * Note this methods implicitly sets the segment length to
+ * -1.
+ *
+ * @param jar the JAR archive to normalize
+ */
+ public static void normalize(File jar)
+ throws IOException {
+ normalize(jar, jar, null);
+ }
+
+ /**
+ * Normalizes a JAR archive in-place so it can be safely signed
+ * and packed.
+ *
+ * As stated in Pack200.Packer's
+ * javadocs applying a Pack200 compression to a JAR archive will
+ * in general make its sigantures invalid. In order to prepare a
+ * JAR for signing it should be "normalized" by packing and
+ * unpacking it. This is what this method does.
+ *
+ * @param jar the JAR archive to normalize
+ * @param props properties to set for the pack operation. This
+ * method will implicitly set the segment limit to -1.
+ */
+ public static void normalize(File jar, Map props)
+ throws IOException {
+ normalize(jar, jar, props);
+ }
+
+ /**
+ * Normalizes a JAR archive so it can be safely signed and packed.
+ *
+ * As stated in Pack200.Packer's
+ * javadocs applying a Pack200 compression to a JAR archive will
+ * in general make its sigantures invalid. In order to prepare a
+ * JAR for signing it should be "normalized" by packing and
+ * unpacking it. This is what this method does.
+ *
+ * This method does not replace the existing archive but creates
+ * a new one.
+ *
+ * Note this methods implicitly sets the segment length to
+ * -1.
+ *
+ * @param from the JAR archive to normalize
+ * @param to the normalized archive
+ */
+ public static void normalize(File from, File to)
+ throws IOException {
+ normalize(from, to, null);
+ }
+
+ /**
+ * Normalizes a JAR archive so it can be safely signed and packed.
+ *
+ * As stated in Pack200.Packer's
+ * javadocs applying a Pack200 compression to a JAR archive will
+ * in general make its sigantures invalid. In order to prepare a
+ * JAR for signing it should be "normalized" by packing and
+ * unpacking it. This is what this method does.
+ *
+ * This method does not replace the existing archive but creates
+ * a new one.
+ *
+ * @param from the JAR archive to normalize
+ * @param to the normalized archive
+ * @param props properties to set for the pack operation. This
+ * method will implicitly set the segment limit to -1.
+ */
+ public static void normalize(File from, File to, Map props)
+ throws IOException {
+ if (props == null) {
+ props = new HashMap();
+ }
+ props.put(Pack200.Packer.SEGMENT_LIMIT, "-1");
+ File f = File.createTempFile("commons-compress", "pack200normalize");
+ f.deleteOnExit();
+ try {
+ OutputStream os = new FileOutputStream(f);
+ JarFile j = null;
+ try {
+ Pack200.Packer p = Pack200.newPacker();
+ p.properties().putAll(props);
+ p.pack(j = new JarFile(from), os);
+ j = null;
+ os.close();
+ os = null;
+
+ Pack200.Unpacker u = Pack200.newUnpacker();
+ os = new JarOutputStream(new FileOutputStream(to));
+ u.unpack(f, (JarOutputStream) os);
+ } finally {
+ if (j != null) {
+ j.close();
+ }
+ if (os != null) {
+ os.close();
+ }
+ }
+ } finally {
+ f.delete();
+ }
+ }
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/StreamBridge.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/StreamBridge.java
new file mode 100644
index 000000000..293e0bb1e
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/StreamBridge.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.compressors.pack200;
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ * Provides an InputStream to read all data written to this
+ * OutputStream.
+ *
+ * @ThreadSafe
+ * @since 1.3
+ */
+abstract class StreamBridge extends FilterOutputStream {
+ private InputStream input;
+ private final Object INPUT_LOCK = new Object();
+
+ protected StreamBridge(OutputStream out) {
+ super(out);
+ }
+
+ protected StreamBridge() {
+ this(null);
+ }
+
+ /**
+ * Provides the input view.
+ */
+ InputStream getInput() throws IOException {
+ synchronized (INPUT_LOCK) {
+ if (input == null) {
+ input = getInputView();
+ }
+ }
+ return input;
+ }
+
+ /**
+ * Creates the input view.
+ */
+ abstract InputStream getInputView() throws IOException;
+
+ /**
+ * Closes input and output and releases all associated resources.
+ */
+ void stop() throws IOException {
+ close();
+ synchronized (INPUT_LOCK) {
+ if (input != null) {
+ input.close();
+ input = null;
+ }
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/TempFileCachingStreamBridge.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/TempFileCachingStreamBridge.java
new file mode 100644
index 000000000..b609b50fc
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/TempFileCachingStreamBridge.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.commons.compress.compressors.pack200;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * StreamSwitcher that caches all data written to the output side in
+ * a temporary file.
+ * @since 1.3
+ */
+class TempFileCachingStreamBridge extends StreamBridge {
+ private final File f;
+
+ TempFileCachingStreamBridge() throws IOException {
+ f = File.createTempFile("commons-compress", "packtemp");
+ f.deleteOnExit();
+ out = new FileOutputStream(f);
+ }
+
+ @Override
+ InputStream getInputView() throws IOException {
+ out.close();
+ return new FileInputStream(f) {
+ @Override
+ public void close() throws IOException {
+ super.close();
+ f.delete();
+ }
+ };
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/package.html
new file mode 100644
index 000000000..dfbcb88e6
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/pack200/package.html
@@ -0,0 +1,82 @@
+
+
+
+ Provides stream classes for compressing and decompressing
+ streams using the Pack200 algorithm used to compress Java
+ archives.
+
+ The streams of this package only work on JAR archives, i.e. a
+ {@link
+ org.apache.commons.compress.compressors.pack200.Pack200CompressorOutputStream
+ Pack200CompressorOutputStream} expects to be wrapped around a
+ stream that a valid JAR archive will be written to and a {@link
+ org.apache.commons.compress.compressors.pack200.Pack200CompressorInputStream
+ Pack200CompressorInputStream} provides a stream to read from a
+ JAR archive.
+
+ JAR archives compressed with Pack200 will in general be
+ different from the original archive when decompressed again.
+ For details see
+ the API
+ documentation of Pack200.
+
+ The streams of this package work on non-deflated streams,
+ i.e. archives like those created with the --no-gzip
+ option of the JDK's pack200 command line tool. If
+ you want to work on deflated streams you must use an additional
+ stream layer - for example by using Apache Commons Compress'
+ gzip package.
+
+ The Pack200 API provided by the Java class library doesn't lend
+ itself to real stream
+ processing. Pack200CompressorInputStream will
+ uncompress its input immediately and then provide
+ an InputStream to a cached result.
+ Likewise Pack200CompressorOutputStream will not
+ write anything to the given OutputStream
+ until finish or close is called - at
+ which point the cached output written so far gets
+ compressed.
+
+ Two different caching modes are available - "in memory", which
+ is the default, and "temporary file". By default data is cached
+ in memory but you should switch to the temporary file option if
+ your archives are really big.
+
+ Given there always is an intermediate result
+ the getBytesRead and getCount methods
+ of Pack200CompressorInputStream are meaningless
+ (read from the real stream or from the intermediate result?)
+ and always return 0.
+
+ During development of the initial version several attempts have
+ been made to use a real streaming API based for example
+ on Piped(In|Out)putStream or explicit stream
+ pumping like Commons Exec's InputStreamPumper but
+ they have all failed because they rely on the output end to be
+ consumed completely or else the (un)pack will block
+ forever. Especially for Pack200InputStream it is
+ very likely that it will be wrapped in
+ a ZipArchiveInputStream which will never read the
+ archive completely as it is not interested in the ZIP central
+ directory data at the end of the JAR archive.
+
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/package.html
new file mode 100644
index 000000000..7b7d504b9
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/package.html
@@ -0,0 +1,24 @@
+
+
+
+ Provides a unified API and factories for dealing with
+ compressed streams.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/FramedSnappyCompressorInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/FramedSnappyCompressorInputStream.java
new file mode 100644
index 000000000..65deab73c
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/FramedSnappyCompressorInputStream.java
@@ -0,0 +1,290 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.snappy;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.PushbackInputStream;
+import java.util.Arrays;
+
+import org.apache.commons.compress.compressors.CompressorInputStream;
+import org.apache.commons.compress.utils.BoundedInputStream;
+import org.apache.commons.compress.utils.IOUtils;
+
+/**
+ * CompressorInputStream for the framing Snappy format.
+ *
+ * Based on the "spec" in the version "Last revised: 2013-10-25"
+ *
+ * @see Snappy framing format description
+ * @since 1.7
+ */
+public class FramedSnappyCompressorInputStream extends CompressorInputStream {
+ /**
+ * package private for tests only.
+ */
+ static final long MASK_OFFSET = 0xa282ead8L;
+
+ private static final int STREAM_IDENTIFIER_TYPE = 0xff;
+ private static final int COMPRESSED_CHUNK_TYPE = 0;
+ private static final int UNCOMPRESSED_CHUNK_TYPE = 1;
+ private static final int PADDING_CHUNK_TYPE = 0xfe;
+ private static final int MIN_UNSKIPPABLE_TYPE = 2;
+ private static final int MAX_UNSKIPPABLE_TYPE = 0x7f;
+ private static final int MAX_SKIPPABLE_TYPE = 0xfd;
+
+ private static final byte[] SZ_SIGNATURE = new byte[] {
+ (byte) STREAM_IDENTIFIER_TYPE, // tag
+ 6, 0, 0, // length
+ 's', 'N', 'a', 'P', 'p', 'Y'
+ };
+
+ /** The underlying stream to read compressed data from */
+ private final PushbackInputStream in;
+
+ private SnappyCompressorInputStream currentCompressedChunk;
+
+ // used in no-arg read method
+ private final byte[] oneByte = new byte[1];
+
+ private boolean endReached, inUncompressedChunk;
+
+ private int uncompressedBytesRemaining;
+ private long expectedChecksum = -1;
+ private PureJavaCrc32C checksum = new PureJavaCrc32C();
+
+ /**
+ * Constructs a new input stream that decompresses snappy-framed-compressed data
+ * from the specified input stream.
+ * @param in the InputStream from which to read the compressed data
+ */
+ public FramedSnappyCompressorInputStream(InputStream in) throws IOException {
+ this.in = new PushbackInputStream(in, 1);
+ readStreamIdentifier();
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int read() throws IOException {
+ return read(oneByte, 0, 1) == -1 ? -1 : oneByte[0] & 0xFF;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void close() throws IOException {
+ if (currentCompressedChunk != null) {
+ currentCompressedChunk.close();
+ currentCompressedChunk = null;
+ }
+ in.close();
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ int read = readOnce(b, off, len);
+ if (read == -1) {
+ readNextBlock();
+ if (endReached) {
+ return -1;
+ }
+ read = readOnce(b, off, len);
+ }
+ return read;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int available() throws IOException {
+ if (inUncompressedChunk) {
+ return Math.min(uncompressedBytesRemaining,
+ in.available());
+ } else if (currentCompressedChunk != null) {
+ return currentCompressedChunk.available();
+ }
+ return 0;
+ }
+
+ /**
+ * Read from the current chunk into the given array.
+ *
+ * @return -1 if there is no current chunk or the number of bytes
+ * read from the current chunk (which may be -1 if the end of the
+ * chunk is reached).
+ */
+ private int readOnce(byte[] b, int off, int len) throws IOException {
+ int read = -1;
+ if (inUncompressedChunk) {
+ int amount = Math.min(uncompressedBytesRemaining, len);
+ if (amount == 0) {
+ return -1;
+ }
+ read = in.read(b, off, amount);
+ if (read != -1) {
+ uncompressedBytesRemaining -= read;
+ count(read);
+ }
+ } else if (currentCompressedChunk != null) {
+ long before = currentCompressedChunk.getBytesRead();
+ read = currentCompressedChunk.read(b, off, len);
+ if (read == -1) {
+ currentCompressedChunk.close();
+ currentCompressedChunk = null;
+ } else {
+ count(currentCompressedChunk.getBytesRead() - before);
+ }
+ }
+ if (read > 0) {
+ checksum.update(b, off, read);
+ }
+ return read;
+ }
+
+ private void readNextBlock() throws IOException {
+ verifyLastChecksumAndReset();
+ inUncompressedChunk = false;
+ int type = readOneByte();
+ if (type == -1) {
+ endReached = true;
+ } else if (type == STREAM_IDENTIFIER_TYPE) {
+ in.unread(type);
+ pushedBackBytes(1);
+ readStreamIdentifier();
+ readNextBlock();
+ } else if (type == PADDING_CHUNK_TYPE
+ || (type > MAX_UNSKIPPABLE_TYPE && type <= MAX_SKIPPABLE_TYPE)) {
+ skipBlock();
+ readNextBlock();
+ } else if (type >= MIN_UNSKIPPABLE_TYPE && type <= MAX_UNSKIPPABLE_TYPE) {
+ throw new IOException("unskippable chunk with type " + type
+ + " (hex " + Integer.toHexString(type) + ")"
+ + " detected.");
+ } else if (type == UNCOMPRESSED_CHUNK_TYPE) {
+ inUncompressedChunk = true;
+ uncompressedBytesRemaining = readSize() - 4 /* CRC */;
+ expectedChecksum = unmask(readCrc());
+ } else if (type == COMPRESSED_CHUNK_TYPE) {
+ long size = readSize() - 4 /* CRC */;
+ expectedChecksum = unmask(readCrc());
+ currentCompressedChunk =
+ new SnappyCompressorInputStream(new BoundedInputStream(in, size));
+ // constructor reads uncompressed size
+ count(currentCompressedChunk.getBytesRead());
+ } else {
+ // impossible as all potential byte values have been covered
+ throw new IOException("unknown chunk type " + type
+ + " detected.");
+ }
+ }
+
+ private long readCrc() throws IOException {
+ byte[] b = new byte[4];
+ int read = IOUtils.readFully(in, b);
+ count(read);
+ if (read != 4) {
+ throw new IOException("premature end of stream");
+ }
+ long crc = 0;
+ for (int i = 0; i < 4; i++) {
+ crc |= (b[i] & 0xFFL) << (8 * i);
+ }
+ return crc;
+ }
+
+ static long unmask(long x) {
+ // ugly, maybe we should just have used ints and deal with the
+ // overflow
+ x -= MASK_OFFSET;
+ x &= 0xffffFFFFL;
+ return ((x >> 17) | (x << 15)) & 0xffffFFFFL;
+ }
+
+ private int readSize() throws IOException {
+ int b = 0;
+ int sz = 0;
+ for (int i = 0; i < 3; i++) {
+ b = readOneByte();
+ if (b == -1) {
+ throw new IOException("premature end of stream");
+ }
+ sz |= (b << (i * 8));
+ }
+ return sz;
+ }
+
+ private void skipBlock() throws IOException {
+ int size = readSize();
+ long read = IOUtils.skip(in, size);
+ count(read);
+ if (read != size) {
+ throw new IOException("premature end of stream");
+ }
+ }
+
+ private void readStreamIdentifier() throws IOException {
+ byte[] b = new byte[10];
+ int read = IOUtils.readFully(in, b);
+ count(read);
+ if (10 != read || !matches(b, 10)) {
+ throw new IOException("Not a framed Snappy stream");
+ }
+ }
+
+ private int readOneByte() throws IOException {
+ int b = in.read();
+ if (b != -1) {
+ count(1);
+ return b & 0xFF;
+ }
+ return -1;
+ }
+
+ private void verifyLastChecksumAndReset() throws IOException {
+ if (expectedChecksum >= 0 && expectedChecksum != checksum.getValue()) {
+ throw new IOException("Checksum verification failed");
+ }
+ expectedChecksum = -1;
+ checksum.reset();
+ }
+
+ /**
+ * Checks if the signature matches what is expected for a .sz file.
+ *
+ * .sz files start with a chunk with tag 0xff and content sNaPpY.
+ *
+ * @param signature the bytes to check
+ * @param length the number of bytes to check
+ * @return true if this is a .sz stream, false otherwise
+ */
+ public static boolean matches(byte[] signature, int length) {
+
+ if (length < SZ_SIGNATURE.length) {
+ return false;
+ }
+
+ byte[] shortenedSig = signature;
+ if (signature.length > SZ_SIGNATURE.length) {
+ shortenedSig = new byte[SZ_SIGNATURE.length];
+ System.arraycopy(signature, 0, shortenedSig, 0, SZ_SIGNATURE.length);
+ }
+
+ return Arrays.equals(shortenedSig, SZ_SIGNATURE);
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/PureJavaCrc32C.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/PureJavaCrc32C.java
new file mode 100644
index 000000000..4163e7aac
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/PureJavaCrc32C.java
@@ -0,0 +1,633 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Some portions of this file Copyright (c) 2004-2006 Intel Corportation
+ * and licensed under the BSD license.
+ */
+package org.apache.commons.compress.compressors.snappy;
+
+import java.util.zip.Checksum;
+
+/**
+ * A pure-java implementation of the CRC32 checksum that uses
+ * the CRC32-C polynomial, the same polynomial used by iSCSI
+ * and implemented on many Intel chipsets supporting SSE4.2.
+ *
+ * This file is a copy of the implementation at the Apache Hadoop project.
+ * @see "http://svn.apache.org/repos/asf/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java"
+ * @NotThreadSafe
+ * @since 1.7
+ */
+class PureJavaCrc32C implements Checksum {
+
+ /** the current CRC value, bit-flipped */
+ private int crc;
+
+ /** Create a new PureJavaCrc32 object. */
+ public PureJavaCrc32C() {
+ reset();
+ }
+
+ public long getValue() {
+ long ret = crc;
+ return (~ret) & 0xffffffffL;
+ }
+
+ public void reset() {
+ crc = 0xffffffff;
+ }
+
+ public void update(byte[] b, int off, int len) {
+ int localCrc = crc;
+
+ while(len > 7) {
+ final int c0 =(b[off+0] ^ localCrc) & 0xff;
+ final int c1 =(b[off+1] ^ (localCrc >>>= 8)) & 0xff;
+ final int c2 =(b[off+2] ^ (localCrc >>>= 8)) & 0xff;
+ final int c3 =(b[off+3] ^ (localCrc >>>= 8)) & 0xff;
+ localCrc = (T[T8_7_start + c0] ^ T[T8_6_start + c1])
+ ^ (T[T8_5_start + c2] ^ T[T8_4_start + c3]);
+
+ final int c4 = b[off+4] & 0xff;
+ final int c5 = b[off+5] & 0xff;
+ final int c6 = b[off+6] & 0xff;
+ final int c7 = b[off+7] & 0xff;
+
+ localCrc ^= (T[T8_3_start + c4] ^ T[T8_2_start + c5])
+ ^ (T[T8_1_start + c6] ^ T[T8_0_start + c7]);
+
+ off += 8;
+ len -= 8;
+ }
+
+ /* loop unroll - duff's device style */
+ switch(len) {
+ case 7: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
+ case 6: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
+ case 5: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
+ case 4: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
+ case 3: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
+ case 2: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
+ case 1: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
+ default:
+ /* nothing */
+ }
+
+ // Publish crc out to object
+ crc = localCrc;
+ }
+
+ final public void update(int b) {
+ crc = (crc >>> 8) ^ T[T8_0_start + ((crc ^ b) & 0xff)];
+ }
+
+ // CRC polynomial tables generated by:
+ // java -cp build/test/classes/:build/classes/ \
+ // org.apache.hadoop.util.TestPureJavaCrc32\$Table 82F63B78
+
+ private static final int T8_0_start = 0*256;
+ private static final int T8_1_start = 1*256;
+ private static final int T8_2_start = 2*256;
+ private static final int T8_3_start = 3*256;
+ private static final int T8_4_start = 4*256;
+ private static final int T8_5_start = 5*256;
+ private static final int T8_6_start = 6*256;
+ private static final int T8_7_start = 7*256;
+
+ private static final int[] T = new int[] {
+ /* T8_0 */
+ 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
+ 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+ 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
+ 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
+ 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
+ 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
+ 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
+ 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+ 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
+ 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+ 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
+ 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
+ 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
+ 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+ 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
+ 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
+ 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
+ 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
+ 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
+ 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+ 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
+ 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
+ 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
+ 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
+ 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
+ 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+ 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
+ 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
+ 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
+ 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+ 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
+ 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+ 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
+ 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
+ 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
+ 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
+ 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
+ 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+ 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
+ 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+ 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
+ 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
+ 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
+ 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+ 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
+ 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
+ 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
+ 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
+ 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
+ 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+ 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
+ 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
+ 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
+ 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
+ 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
+ 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+ 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
+ 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
+ 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
+ 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+ 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
+ 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+ 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
+ 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
+ /* T8_1 */
+ 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899,
+ 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
+ 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21,
+ 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
+ 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918,
+ 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
+ 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0,
+ 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
+ 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B,
+ 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
+ 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823,
+ 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
+ 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A,
+ 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
+ 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2,
+ 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
+ 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D,
+ 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
+ 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25,
+ 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
+ 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C,
+ 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
+ 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4,
+ 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
+ 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F,
+ 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
+ 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27,
+ 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
+ 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E,
+ 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
+ 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6,
+ 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
+ 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260,
+ 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
+ 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8,
+ 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
+ 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1,
+ 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
+ 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059,
+ 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
+ 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162,
+ 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
+ 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA,
+ 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
+ 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3,
+ 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
+ 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B,
+ 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
+ 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464,
+ 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
+ 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC,
+ 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
+ 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5,
+ 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
+ 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D,
+ 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
+ 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766,
+ 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
+ 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE,
+ 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
+ 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7,
+ 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
+ 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F,
+ 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483,
+ /* T8_2 */
+ 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073,
+ 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
+ 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6,
+ 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
+ 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9,
+ 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
+ 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C,
+ 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
+ 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67,
+ 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
+ 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2,
+ 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
+ 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED,
+ 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
+ 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828,
+ 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
+ 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA,
+ 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
+ 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F,
+ 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
+ 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20,
+ 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
+ 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5,
+ 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
+ 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE,
+ 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
+ 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B,
+ 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
+ 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634,
+ 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
+ 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1,
+ 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
+ 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730,
+ 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
+ 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5,
+ 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
+ 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA,
+ 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
+ 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F,
+ 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
+ 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24,
+ 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
+ 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1,
+ 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
+ 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE,
+ 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
+ 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B,
+ 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
+ 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9,
+ 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
+ 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C,
+ 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
+ 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63,
+ 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
+ 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6,
+ 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
+ 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD,
+ 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
+ 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238,
+ 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
+ 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177,
+ 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
+ 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2,
+ 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8,
+ /* T8_3 */
+ 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939,
+ 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
+ 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF,
+ 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
+ 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804,
+ 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
+ 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2,
+ 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
+ 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2,
+ 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
+ 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54,
+ 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
+ 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F,
+ 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
+ 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69,
+ 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
+ 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE,
+ 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
+ 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538,
+ 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
+ 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3,
+ 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
+ 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405,
+ 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
+ 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255,
+ 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
+ 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3,
+ 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
+ 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368,
+ 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
+ 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E,
+ 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
+ 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006,
+ 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
+ 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0,
+ 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
+ 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B,
+ 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
+ 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD,
+ 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
+ 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D,
+ 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
+ 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B,
+ 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
+ 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0,
+ 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
+ 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656,
+ 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
+ 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1,
+ 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
+ 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07,
+ 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
+ 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC,
+ 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
+ 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A,
+ 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
+ 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A,
+ 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
+ 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C,
+ 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
+ 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57,
+ 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
+ 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1,
+ 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842,
+ /* T8_4 */
+ 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4,
+ 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
+ 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65,
+ 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
+ 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127,
+ 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
+ 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6,
+ 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
+ 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3,
+ 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
+ 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32,
+ 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
+ 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470,
+ 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
+ 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1,
+ 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
+ 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A,
+ 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
+ 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB,
+ 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
+ 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89,
+ 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
+ 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018,
+ 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
+ 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D,
+ 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
+ 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C,
+ 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
+ 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE,
+ 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
+ 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F,
+ 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
+ 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8,
+ 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
+ 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39,
+ 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
+ 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B,
+ 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
+ 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA,
+ 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
+ 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF,
+ 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
+ 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E,
+ 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
+ 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C,
+ 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
+ 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD,
+ 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
+ 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06,
+ 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
+ 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497,
+ 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
+ 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5,
+ 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
+ 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544,
+ 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
+ 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51,
+ 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
+ 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0,
+ 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
+ 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82,
+ 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
+ 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013,
+ 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3,
+ /* T8_5 */
+ 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA,
+ 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
+ 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5,
+ 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
+ 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4,
+ 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
+ 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB,
+ 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
+ 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57,
+ 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
+ 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548,
+ 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
+ 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69,
+ 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
+ 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576,
+ 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
+ 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031,
+ 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
+ 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E,
+ 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
+ 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F,
+ 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
+ 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810,
+ 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
+ 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC,
+ 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
+ 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3,
+ 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
+ 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682,
+ 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
+ 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D,
+ 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
+ 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C,
+ 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
+ 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413,
+ 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
+ 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32,
+ 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
+ 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D,
+ 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
+ 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81,
+ 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
+ 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E,
+ 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
+ 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF,
+ 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
+ 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0,
+ 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
+ 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7,
+ 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
+ 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8,
+ 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
+ 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9,
+ 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
+ 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6,
+ 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
+ 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A,
+ 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
+ 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975,
+ 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
+ 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154,
+ 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
+ 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B,
+ 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C,
+ /* T8_6 */
+ 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558,
+ 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
+ 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B,
+ 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
+ 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE,
+ 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
+ 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD,
+ 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
+ 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5,
+ 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
+ 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6,
+ 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
+ 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43,
+ 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
+ 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110,
+ 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
+ 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222,
+ 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
+ 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71,
+ 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
+ 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884,
+ 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
+ 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7,
+ 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
+ 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F,
+ 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
+ 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC,
+ 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
+ 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39,
+ 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
+ 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A,
+ 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
+ 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC,
+ 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
+ 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF,
+ 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
+ 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A,
+ 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
+ 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59,
+ 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
+ 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811,
+ 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
+ 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542,
+ 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
+ 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7,
+ 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
+ 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4,
+ 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
+ 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6,
+ 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
+ 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185,
+ 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
+ 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670,
+ 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
+ 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23,
+ 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
+ 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B,
+ 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
+ 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238,
+ 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
+ 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD,
+ 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
+ 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E,
+ 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F,
+ /* T8_7 */
+ 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769,
+ 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
+ 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3,
+ 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
+ 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD,
+ 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
+ 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07,
+ 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
+ 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0,
+ 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
+ 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A,
+ 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
+ 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44,
+ 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
+ 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E,
+ 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
+ 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B,
+ 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
+ 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881,
+ 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
+ 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF,
+ 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
+ 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135,
+ 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
+ 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2,
+ 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
+ 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18,
+ 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
+ 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076,
+ 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
+ 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC,
+ 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
+ 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D,
+ 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
+ 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7,
+ 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
+ 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9,
+ 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
+ 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63,
+ 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
+ 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494,
+ 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
+ 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E,
+ 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
+ 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20,
+ 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
+ 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA,
+ 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
+ 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F,
+ 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
+ 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5,
+ 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
+ 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B,
+ 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
+ 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751,
+ 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
+ 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6,
+ 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
+ 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C,
+ 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
+ 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612,
+ 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
+ 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8,
+ 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
+ };
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/SnappyCompressorInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/SnappyCompressorInputStream.java
new file mode 100644
index 000000000..46c218868
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/SnappyCompressorInputStream.java
@@ -0,0 +1,423 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.snappy;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.commons.compress.compressors.CompressorInputStream;
+
+/**
+ * CompressorInputStream for the raw Snappy format.
+ *
+ * This implementation uses an internal buffer in order to handle
+ * the back-references that are at the heart of the LZ77 algorithm.
+ * The size of the buffer must be at least as big as the biggest
+ * offset used in the compressed stream. The current version of the
+ * Snappy algorithm as defined by Google works on 32k blocks and
+ * doesn't contain offsets bigger than 32k which is the default block
+ * size used by this class.
+ *
+ * @see Snappy compressed format description
+ * @since 1.7
+ */
+public class SnappyCompressorInputStream extends CompressorInputStream {
+
+ /** Mask used to determine the type of "tag" is being processed */
+ private static final int TAG_MASK = 0x03;
+
+ /** Default block size */
+ public static final int DEFAULT_BLOCK_SIZE = 32768;
+
+ /** Buffer to write decompressed bytes to for back-references */
+ private final byte[] decompressBuf;
+
+ /** One behind the index of the last byte in the buffer that was written */
+ private int writeIndex;
+
+ /** Index of the next byte to be read. */
+ private int readIndex;
+
+ /** The actual block size specified */
+ private final int blockSize;
+
+ /** The underlying stream to read compressed data from */
+ private final InputStream in;
+
+ /** The size of the uncompressed data */
+ private final int size;
+
+ /** Number of uncompressed bytes still to be read. */
+ private int uncompressedBytesRemaining;
+
+ // used in no-arg read method
+ private final byte[] oneByte = new byte[1];
+
+ private boolean endReached = false;
+
+ /**
+ * Constructor using the default buffer size of 32k.
+ *
+ * @param is
+ * An InputStream to read compressed data from
+ *
+ * @throws IOException
+ */
+ public SnappyCompressorInputStream(final InputStream is) throws IOException {
+ this(is, DEFAULT_BLOCK_SIZE);
+ }
+
+ /**
+ * Constructor using a configurable buffer size.
+ *
+ * @param is
+ * An InputStream to read compressed data from
+ * @param blockSize
+ * The block size used in compression
+ *
+ * @throws IOException
+ */
+ public SnappyCompressorInputStream(final InputStream is, final int blockSize)
+ throws IOException {
+ this.in = is;
+ this.blockSize = blockSize;
+ this.decompressBuf = new byte[blockSize * 3];
+ this.writeIndex = readIndex = 0;
+ uncompressedBytesRemaining = size = (int) readSize();
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int read() throws IOException {
+ return read(oneByte, 0, 1) == -1 ? -1 : oneByte[0] & 0xFF;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void close() throws IOException {
+ in.close();
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int available() {
+ return writeIndex - readIndex;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ if (endReached) {
+ return -1;
+ }
+ final int avail = available();
+ if (len > avail) {
+ fill(len - avail);
+ }
+
+ int readable = Math.min(len, available());
+ System.arraycopy(decompressBuf, readIndex, b, off, readable);
+ readIndex += readable;
+ if (readIndex > blockSize) {
+ slideBuffer();
+ }
+ return readable;
+ }
+
+ /**
+ * Try to fill the buffer with enough bytes to satisfy the current
+ * read request.
+ *
+ * @param len the number of uncompressed bytes to read
+ */
+ private void fill(int len) throws IOException {
+ if (uncompressedBytesRemaining == 0) {
+ endReached = true;
+ }
+ int readNow = Math.min(len, uncompressedBytesRemaining);
+
+ while (readNow > 0) {
+ final int b = readOneByte();
+ int length = 0;
+ long offset = 0;
+
+ switch (b & TAG_MASK) {
+
+ case 0x00:
+
+ length = readLiteralLength(b);
+
+ if (expandLiteral(length)) {
+ return;
+ }
+ break;
+
+ case 0x01:
+
+ /*
+ * These elements can encode lengths between [4..11] bytes and
+ * offsets between [0..2047] bytes. (len-4) occupies three bits
+ * and is stored in bits [2..4] of the tag byte. The offset
+ * occupies 11 bits, of which the upper three are stored in the
+ * upper three bits ([5..7]) of the tag byte, and the lower
+ * eight are stored in a byte following the tag byte.
+ */
+
+ length = 4 + ((b >> 2) & 0x07);
+ offset = (b & 0xE0) << 3;
+ offset |= readOneByte();
+
+ if (expandCopy(offset, length)) {
+ return;
+ }
+ break;
+
+ case 0x02:
+
+ /*
+ * These elements can encode lengths between [1..64] and offsets
+ * from [0..65535]. (len-1) occupies six bits and is stored in
+ * the upper six bits ([2..7]) of the tag byte. The offset is
+ * stored as a little-endian 16-bit integer in the two bytes
+ * following the tag byte.
+ */
+
+ length = (b >> 2) + 1;
+
+ offset = readOneByte();
+ offset |= readOneByte() << 8;
+
+ if (expandCopy(offset, length)) {
+ return;
+ }
+ break;
+
+ case 0x03:
+
+ /*
+ * These are like the copies with 2-byte offsets (see previous
+ * subsection), except that the offset is stored as a 32-bit
+ * integer instead of a 16-bit integer (and thus will occupy
+ * four bytes).
+ */
+
+ length = (b >> 2) + 1;
+
+ offset = readOneByte();
+ offset |= readOneByte() << 8;
+ offset |= readOneByte() << 16;
+ offset |= ((long) readOneByte()) << 24;
+
+ if (expandCopy(offset, length)) {
+ return;
+ }
+ break;
+ }
+
+ readNow -= length;
+ uncompressedBytesRemaining -= length;
+ }
+ }
+
+ /**
+ * Slide buffer.
+ *
+ * Move all bytes of the buffer after the first block down to
+ * the beginning of the buffer.
+ */
+ private void slideBuffer() {
+ System.arraycopy(decompressBuf, blockSize, decompressBuf, 0,
+ blockSize * 2);
+ writeIndex -= blockSize;
+ readIndex -= blockSize;
+ }
+
+
+ /*
+ * For literals up to and including 60 bytes in length, the
+ * upper six bits of the tag byte contain (len-1). The literal
+ * follows immediately thereafter in the bytestream. - For
+ * longer literals, the (len-1) value is stored after the tag
+ * byte, little-endian. The upper six bits of the tag byte
+ * describe how many bytes are used for the length; 60, 61, 62
+ * or 63 for 1-4 bytes, respectively. The literal itself follows
+ * after the length.
+ */
+ private int readLiteralLength(int b) throws IOException {
+ int length;
+ switch (b >> 2) {
+ case 60:
+ length = readOneByte();
+ break;
+ case 61:
+ length = readOneByte();
+ length |= readOneByte() << 8;
+ break;
+ case 62:
+ length = readOneByte();
+ length |= readOneByte() << 8;
+ length |= readOneByte() << 16;
+ break;
+ case 63:
+ length = readOneByte();
+ length |= readOneByte() << 8;
+ length |= readOneByte() << 16;
+ length |= (((long) readOneByte()) << 24);
+ break;
+ default:
+ length = b >> 2;
+ break;
+ }
+
+ return length + 1;
+ }
+
+ /**
+ * Literals are uncompressed data stored directly in the byte stream.
+ *
+ * @param length
+ * The number of bytes to read from the underlying stream
+ *
+ * @throws IOException
+ * If the first byte cannot be read for any reason other than
+ * end of file, or if the input stream has been closed, or if
+ * some other I/O error occurs.
+ * @return True if the decompressed data should be flushed
+ */
+ private boolean expandLiteral(final int length) throws IOException {
+ int bytesRead = in.read(decompressBuf, writeIndex, length);
+ count(bytesRead);
+ if (length != bytesRead) {
+ throw new IOException("Premature end of stream");
+ }
+
+ writeIndex += length;
+ return writeIndex >= 2 * this.blockSize;
+ }
+
+ /**
+ * Copies are references back into previous decompressed data, telling the
+ * decompressor to reuse data it has previously decoded. They encode two
+ * values: The offset, saying how many bytes back from the current position
+ * to read, and the length, how many bytes to copy. Offsets of zero can be
+ * encoded, but are not legal; similarly, it is possible to encode
+ * backreferences that would go past the end of the block (offset > current
+ * decompressed position), which is also nonsensical and thus not allowed.
+ *
+ * @param off
+ * The offset from the backward from the end of expanded stream
+ * @param length
+ * The number of bytes to copy
+ *
+ * @throws IOException
+ * An the offset expands past the front of the decompression
+ * buffer
+ * @return True if the decompressed data should be flushed
+ */
+ private boolean expandCopy(final long off, int length) throws IOException {
+ if (off > blockSize) {
+ throw new IOException("Offset is larger than block size");
+ }
+ int offset = (int) off;
+
+ if (offset == 1) {
+ byte lastChar = decompressBuf[writeIndex - 1];
+ for (int i = 0; i < length; i++) {
+ decompressBuf[writeIndex++] = lastChar;
+ }
+ } else if (length < offset) {
+ System.arraycopy(decompressBuf, writeIndex - offset,
+ decompressBuf, writeIndex, length);
+ writeIndex += length;
+ } else {
+ int fullRotations = length / offset;
+ int pad = length - (offset * fullRotations);
+
+ while (fullRotations-- != 0) {
+ System.arraycopy(decompressBuf, writeIndex - offset,
+ decompressBuf, writeIndex, offset);
+ writeIndex += offset;
+ }
+
+ if (pad > 0) {
+ System.arraycopy(decompressBuf, writeIndex - offset,
+ decompressBuf, writeIndex, pad);
+
+ writeIndex += pad;
+ }
+ }
+ return writeIndex >= 2 * this.blockSize;
+ }
+
+ /**
+ * This helper method reads the next byte of data from the input stream. The
+ * value byte is returned as an int in the range 0
+ * to 255. If no byte is available because the end of the
+ * stream has been reached, an Exception is thrown.
+ *
+ * @return The next byte of data
+ * @throws IOException
+ * EOF is reached or error reading the stream
+ */
+ private int readOneByte() throws IOException {
+ int b = in.read();
+ if (b == -1) {
+ throw new IOException("Premature end of stream");
+ }
+ count(1);
+ return b & 0xFF;
+ }
+
+ /**
+ * The stream starts with the uncompressed length (up to a maximum of 2^32 -
+ * 1), stored as a little-endian varint. Varints consist of a series of
+ * bytes, where the lower 7 bits are data and the upper bit is set iff there
+ * are more bytes to be read. In other words, an uncompressed length of 64
+ * would be stored as 0x40, and an uncompressed length of 2097150 (0x1FFFFE)
+ * would be stored as 0xFE 0xFF 0x7F.
+ *
+ * @return The size of the uncompressed data
+ *
+ * @throws IOException
+ * Could not read a byte
+ */
+ private long readSize() throws IOException {
+ int index = 0;
+ long sz = 0;
+ int b = 0;
+
+ do {
+ b = readOneByte();
+ sz |= (b & 0x7f) << (index++ * 7);
+ } while (0 != (b & 0x80));
+ return sz;
+ }
+
+ /**
+ * Get the uncompressed size of the stream
+ *
+ * @return the uncompressed size
+ */
+ public int getSize() {
+ return size;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/package.html
new file mode 100644
index 000000000..a0d61de05
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/snappy/package.html
@@ -0,0 +1,38 @@
+
+
+
+ Provides stream classes for decompressing streams using the
+ Snappy
+ algorithm.
+
+ The raw Snappy format which only contains the compressed data
+ is supported by the SnappyCompressorInputStream
+ class while the so called "framing format" is implemented
+ by FramedSnappyCompressorInputStream. Note there
+ have been different versions of the fraing format specification,
+ the implementation in Commons Compress is based on the
+ specification "Last revised: 2013-10-25".
+
+ Only the "framing format" can be auto-detected this means you
+ have to speficy the format explicitly if you want to read a
+ "raw" Snappy stream
+ via CompressorStreamFactory.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/ZCompressorInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/ZCompressorInputStream.java
new file mode 100644
index 000000000..789448dac
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/ZCompressorInputStream.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.z;
+
+import java.io.IOException;
+import java.io.InputStream;
+import org.apache.commons.compress.compressors.z._internal_.InternalLZWInputStream;
+
+/**
+ * Input stream that decompresses .Z files.
+ * @NotThreadSafe
+ * @since 1.7
+ */
+public class ZCompressorInputStream extends InternalLZWInputStream {
+ private static final int MAGIC_1 = 0x1f;
+ private static final int MAGIC_2 = 0x9d;
+ private static final int BLOCK_MODE_MASK = 0x80;
+ private static final int MAX_CODE_SIZE_MASK = 0x1f;
+ private final boolean blockMode;
+ private final int maxCodeSize;
+ private long totalCodesRead = 0;
+
+ public ZCompressorInputStream(InputStream inputStream) throws IOException {
+ super(inputStream);
+ int firstByte = in.read();
+ int secondByte = in.read();
+ int thirdByte = in.read();
+ if (firstByte != MAGIC_1 || secondByte != MAGIC_2 || thirdByte < 0) {
+ throw new IOException("Input is not in .Z format");
+ }
+ blockMode = (thirdByte & BLOCK_MODE_MASK) != 0;
+ maxCodeSize = thirdByte & MAX_CODE_SIZE_MASK;
+ if (blockMode) {
+ setClearCode(codeSize);
+ }
+ initializeTables(maxCodeSize);
+ clearEntries();
+ }
+
+ private void clearEntries() {
+ tableSize = 1 << 8;
+ if (blockMode) {
+ tableSize++;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * This method is only protected for technical reasons
+ * and is not part of Commons Compress' published API. It may
+ * change or disappear without warning.
+ */
+ @Override
+ protected int readNextCode() throws IOException {
+ int code = super.readNextCode();
+ if (code >= 0) {
+ ++totalCodesRead;
+ }
+ return code;
+ }
+
+ private void reAlignReading() throws IOException {
+ // "compress" works in multiples of 8 symbols, each codeBits bits long.
+ // When codeBits changes, the remaining unused symbols in the current
+ // group of 8 are still written out, in the old codeSize,
+ // as garbage values (usually zeroes) that need to be skipped.
+ long codeReadsToThrowAway = 8 - (totalCodesRead % 8);
+ if (codeReadsToThrowAway == 8) {
+ codeReadsToThrowAway = 0;
+ }
+ for (long i = 0; i < codeReadsToThrowAway; i++) {
+ readNextCode();
+ }
+ bitsCached = 0;
+ bitsCachedSize = 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ * This method is only protected for technical reasons
+ * and is not part of Commons Compress' published API. It may
+ * change or disappear without warning.
+ */
+ @Override
+ protected int addEntry(int previousCode, byte character) throws IOException {
+ final int maxTableSize = 1 << codeSize;
+ int r = addEntry(previousCode, character, maxTableSize);
+ if (tableSize == maxTableSize && codeSize < maxCodeSize) {
+ reAlignReading();
+ codeSize++;
+ }
+ return r;
+ }
+
+ /**
+ * {@inheritDoc}
+ * This method is only protected for technical reasons
+ * and is not part of Commons Compress' published API. It may
+ * change or disappear without warning.
+ */
+ @Override
+ protected int decompressNextSymbol() throws IOException {
+ //
+ // table entry table entry
+ // _____________ _____
+ // table entry / \ / \
+ // ____________/ \ \
+ // / / \ / \ \
+ // +---+---+---+---+---+---+---+---+---+---+
+ // | . | . | . | . | . | . | . | . | . | . |
+ // +---+---+---+---+---+---+---+---+---+---+
+ // |<--------->|<------------->|<----->|<->|
+ // symbol symbol symbol symbol
+ //
+ final int code = readNextCode();
+ if (code < 0) {
+ return -1;
+ } else if (blockMode && code == clearCode) {
+ clearEntries();
+ reAlignReading();
+ codeSize = 9;
+ previousCode = -1;
+ return 0;
+ } else {
+ boolean addedUnfinishedEntry = false;
+ if (code == tableSize) {
+ addRepeatOfPreviousCode();
+ addedUnfinishedEntry = true;
+ } else if (code > tableSize) {
+ throw new IOException(String.format("Invalid %d bit code 0x%x", Integer.valueOf(codeSize), Integer.valueOf(code)));
+ }
+ return expandCodeToOutputStack(code, addedUnfinishedEntry);
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/_internal_/InternalLZWInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/_internal_/InternalLZWInputStream.java
new file mode 100644
index 000000000..a19e54b42
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/_internal_/InternalLZWInputStream.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.compressors.z._internal_;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.commons.compress.compressors.CompressorInputStream;
+
+/**
+ * This class is only public for technical reasons and is not
+ * part of Commons Compress' published API - it may change or
+ * disappear without warning.
+ *
+ * Base-class for traditional Unix ".Z" compression and the
+ * Unshrinking method of ZIP archive.
+ *
+ * @NotThreadSafe
+ * @since 1.7
+ */
+public abstract class InternalLZWInputStream extends CompressorInputStream {
+ private final byte[] oneByte = new byte[1];
+
+ protected final InputStream in;
+ protected int clearCode = -1;
+ protected int codeSize = 9;
+ protected int bitsCached = 0;
+ protected int bitsCachedSize = 0;
+ protected int previousCode = -1;
+ protected int tableSize = 0;
+ protected int[] prefixes;
+ protected byte[] characters;
+ private byte[] outputStack;
+ private int outputStackLocation;
+
+ protected InternalLZWInputStream(InputStream inputStream) throws IOException {
+ this.in = inputStream;
+ }
+
+ @Override
+ public void close() throws IOException {
+ in.close();
+ }
+
+ @Override
+ public int read() throws IOException {
+ int ret = read(oneByte);
+ if (ret < 0) {
+ return ret;
+ }
+ return 0xff & oneByte[0];
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ int bytesRead = readFromStack(b, off, len);
+ while (len - bytesRead > 0) {
+ int result = decompressNextSymbol();
+ if (result < 0) {
+ if (bytesRead > 0) {
+ count(bytesRead);
+ return bytesRead;
+ }
+ return result;
+ }
+ bytesRead += readFromStack(b, off + bytesRead, len - bytesRead);
+ }
+ count(bytesRead);
+ return bytesRead;
+ }
+
+ /**
+ * Read the next code and expand it.
+ */
+ protected abstract int decompressNextSymbol() throws IOException;
+
+ /**
+ * Add a new entry to the dictionary.
+ */
+ protected abstract int addEntry(int previousCode, byte character)
+ throws IOException;
+
+ /**
+ * Sets the clear code based on the code size.
+ */
+ protected void setClearCode(int codeSize) {
+ clearCode = (1 << (codeSize - 1));
+ }
+
+ /**
+ * Initializes the arrays based on the maximum code size.
+ */
+ protected void initializeTables(int maxCodeSize) {
+ final int maxTableSize = 1 << maxCodeSize;
+ prefixes = new int[maxTableSize];
+ characters = new byte[maxTableSize];
+ outputStack = new byte[maxTableSize];
+ outputStackLocation = maxTableSize;
+ final int max = 1 << 8;
+ for (int i = 0; i < max; i++) {
+ prefixes[i] = -1;
+ characters[i] = (byte) i;
+ }
+ }
+
+ /**
+ * Reads the next code from the stream.
+ */
+ protected int readNextCode() throws IOException {
+ while (bitsCachedSize < codeSize) {
+ final int nextByte = in.read();
+ if (nextByte < 0) {
+ return nextByte;
+ }
+ bitsCached |= (nextByte << bitsCachedSize);
+ bitsCachedSize += 8;
+ }
+ final int mask = (1 << codeSize) - 1;
+ final int code = (bitsCached & mask);
+ bitsCached >>>= codeSize;
+ bitsCachedSize -= codeSize;
+ return code;
+ }
+
+ /**
+ * Adds a new entry if the maximum table size hasn't been exceeded
+ * and returns the new index.
+ */
+ protected int addEntry(int previousCode, byte character, int maxTableSize) {
+ if (tableSize < maxTableSize) {
+ final int index = tableSize;
+ prefixes[tableSize] = previousCode;
+ characters[tableSize] = character;
+ tableSize++;
+ return index;
+ }
+ return -1;
+ }
+
+ /**
+ * Add entry for repeat of previousCode we haven't added, yet.
+ */
+ protected int addRepeatOfPreviousCode() throws IOException {
+ if (previousCode == -1) {
+ // can't have a repeat for the very first code
+ throw new IOException("The first code can't be a reference to its preceding code");
+ }
+ byte firstCharacter = 0;
+ for (int last = previousCode; last >= 0; last = prefixes[last]) {
+ firstCharacter = characters[last];
+ }
+ return addEntry(previousCode, firstCharacter);
+ }
+
+ /**
+ * Expands the entry with index code to the output stack and may
+ * create a new entry
+ */
+ protected int expandCodeToOutputStack(int code, boolean addedUnfinishedEntry)
+ throws IOException {
+ for (int entry = code; entry >= 0; entry = prefixes[entry]) {
+ outputStack[--outputStackLocation] = characters[entry];
+ }
+ if (previousCode != -1 && !addedUnfinishedEntry) {
+ addEntry(previousCode, outputStack[outputStackLocation]);
+ }
+ previousCode = code;
+ return outputStackLocation;
+ }
+
+ private int readFromStack(byte[] b, int off, int len) {
+ int remainingInStack = outputStack.length - outputStackLocation;
+ if (remainingInStack > 0) {
+ int maxLength = Math.min(remainingInStack, len);
+ System.arraycopy(outputStack, outputStackLocation, b, off, maxLength);
+ outputStackLocation += maxLength;
+ return maxLength;
+ }
+ return 0;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/_internal_/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/_internal_/package.html
new file mode 100644
index 000000000..b0f1525ec
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/_internal_/package.html
@@ -0,0 +1,25 @@
+
+
+
+ This package is not part of Commons Compress' published
+ API. It may change without warning. Contains classes
+ used by Commons Compress internally.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/package.html
new file mode 100644
index 000000000..ca9924b78
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/compressors/z/package.html
@@ -0,0 +1,24 @@
+
+
+
+ Provides stream classes for decompressing
+ streams using the "compress" algorithm used to write .Z files.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/utils/ArchiveUtils.java b/Tools/Cache Editor/src/org/apache/commons/compress/utils/ArchiveUtils.java
new file mode 100644
index 000000000..16beed2e0
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/utils/ArchiveUtils.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.commons.compress.utils;
+
+import java.io.UnsupportedEncodingException;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+
+/**
+ * Generic Archive utilities
+ */
+public class ArchiveUtils {
+
+ /** Private constructor to prevent instantiation of this utility class. */
+ private ArchiveUtils(){
+ }
+
+ /**
+ * Generates a string containing the name, isDirectory setting and size of an entry.
+ *
+ * For example:
+ *
+ * - 2000 main.c
+ * d 100 testfiles
+ *
+ *
+ * @return the representation of the entry
+ */
+ public static String toString(ArchiveEntry entry){
+ StringBuilder sb = new StringBuilder();
+ sb.append(entry.isDirectory()? 'd' : '-');// c.f. "ls -l" output
+ String size = Long.toString(entry.getSize());
+ sb.append(' ');
+ // Pad output to 7 places, leading spaces
+ for(int i=7; i > size.length(); i--){
+ sb.append(' ');
+ }
+ sb.append(size);
+ sb.append(' ').append(entry.getName());
+ return sb.toString();
+ }
+
+ /**
+ * Check if buffer contents matches Ascii String.
+ *
+ * @param expected
+ * @param buffer
+ * @param offset
+ * @param length
+ * @return {@code true} if buffer is the same as the expected string
+ */
+ public static boolean matchAsciiBuffer(
+ String expected, byte[] buffer, int offset, int length){
+ byte[] buffer1;
+ try {
+ buffer1 = expected.getBytes(CharsetNames.US_ASCII);
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException(e); // Should not happen
+ }
+ return isEqual(buffer1, 0, buffer1.length, buffer, offset, length, false);
+ }
+
+ /**
+ * Check if buffer contents matches Ascii String.
+ *
+ * @param expected
+ * @param buffer
+ * @return {@code true} if buffer is the same as the expected string
+ */
+ public static boolean matchAsciiBuffer(String expected, byte[] buffer){
+ return matchAsciiBuffer(expected, buffer, 0, buffer.length);
+ }
+
+ /**
+ * Convert a string to Ascii bytes.
+ * Used for comparing "magic" strings which need to be independent of the default Locale.
+ *
+ * @param inputString
+ * @return the bytes
+ */
+ public static byte[] toAsciiBytes(String inputString){
+ try {
+ return inputString.getBytes(CharsetNames.US_ASCII);
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException(e); // Should never happen
+ }
+ }
+
+ /**
+ * Convert an input byte array to a String using the ASCII character set.
+ *
+ * @param inputBytes
+ * @return the bytes, interpreted as an Ascii string
+ */
+ public static String toAsciiString(final byte[] inputBytes){
+ try {
+ return new String(inputBytes, CharsetNames.US_ASCII);
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException(e); // Should never happen
+ }
+ }
+
+ /**
+ * Convert an input byte array to a String using the ASCII character set.
+ *
+ * @param inputBytes input byte array
+ * @param offset offset within array
+ * @param length length of array
+ * @return the bytes, interpreted as an Ascii string
+ */
+ public static String toAsciiString(final byte[] inputBytes, int offset, int length){
+ try {
+ return new String(inputBytes, offset, length, CharsetNames.US_ASCII);
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException(e); // Should never happen
+ }
+ }
+
+ /**
+ * Compare byte buffers, optionally ignoring trailing nulls
+ *
+ * @param buffer1
+ * @param offset1
+ * @param length1
+ * @param buffer2
+ * @param offset2
+ * @param length2
+ * @param ignoreTrailingNulls
+ * @return {@code true} if buffer1 and buffer2 have same contents, having regard to trailing nulls
+ */
+ public static boolean isEqual(
+ final byte[] buffer1, final int offset1, final int length1,
+ final byte[] buffer2, final int offset2, final int length2,
+ boolean ignoreTrailingNulls){
+ int minLen=length1 < length2 ? length1 : length2;
+ for (int i=0; i < minLen; i++){
+ if (buffer1[offset1+i] != buffer2[offset2+i]){
+ return false;
+ }
+ }
+ if (length1 == length2){
+ return true;
+ }
+ if (ignoreTrailingNulls){
+ if (length1 > length2){
+ for(int i = length2; i < length1; i++){
+ if (buffer1[offset1+i] != 0){
+ return false;
+ }
+ }
+ } else {
+ for(int i = length1; i < length2; i++){
+ if (buffer2[offset2+i] != 0){
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Compare byte buffers
+ *
+ * @param buffer1
+ * @param offset1
+ * @param length1
+ * @param buffer2
+ * @param offset2
+ * @param length2
+ * @return {@code true} if buffer1 and buffer2 have same contents
+ */
+ public static boolean isEqual(
+ final byte[] buffer1, final int offset1, final int length1,
+ final byte[] buffer2, final int offset2, final int length2){
+ return isEqual(buffer1, offset1, length1, buffer2, offset2, length2, false);
+ }
+
+ /**
+ * Compare byte buffers
+ *
+ * @param buffer1
+ * @param buffer2
+ * @return {@code true} if buffer1 and buffer2 have same contents
+ */
+ public static boolean isEqual(final byte[] buffer1, final byte[] buffer2 ){
+ return isEqual(buffer1, 0, buffer1.length, buffer2, 0, buffer2.length, false);
+ }
+
+ /**
+ * Compare byte buffers, optionally ignoring trailing nulls
+ *
+ * @param buffer1
+ * @param buffer2
+ * @param ignoreTrailingNulls
+ * @return {@code true} if buffer1 and buffer2 have same contents
+ */
+ public static boolean isEqual(final byte[] buffer1, final byte[] buffer2, boolean ignoreTrailingNulls){
+ return isEqual(buffer1, 0, buffer1.length, buffer2, 0, buffer2.length, ignoreTrailingNulls);
+ }
+
+ /**
+ * Compare byte buffers, ignoring trailing nulls
+ *
+ * @param buffer1
+ * @param offset1
+ * @param length1
+ * @param buffer2
+ * @param offset2
+ * @param length2
+ * @return {@code true} if buffer1 and buffer2 have same contents, having regard to trailing nulls
+ */
+ public static boolean isEqualWithNull(
+ final byte[] buffer1, final int offset1, final int length1,
+ final byte[] buffer2, final int offset2, final int length2){
+ return isEqual(buffer1, offset1, length1, buffer2, offset2, length2, true);
+ }
+
+ /**
+ * Returns true if the first N bytes of an array are all zero
+ *
+ * @param a
+ * The array to check
+ * @param size
+ * The number of characters to check (not the size of the array)
+ * @return true if the first N bytes are zero
+ */
+ public static boolean isArrayZero(byte[] a, int size) {
+ for (int i = 0; i < size; i++) {
+ if (a[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/utils/BoundedInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/utils/BoundedInputStream.java
new file mode 100644
index 000000000..791f08cf7
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/utils/BoundedInputStream.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.utils;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * A stream that limits reading from a wrapped stream to a given number of bytes.
+ * @NotThreadSafe
+ * @since 1.6
+ */
+public class BoundedInputStream extends InputStream {
+ private final InputStream in;
+ private long bytesRemaining;
+
+ /**
+ * Creates the stream that will at most read the given amount of
+ * bytes from the given stream.
+ * @param in the stream to read from
+ * @param size the maximum amount of bytes to read
+ */
+ public BoundedInputStream(final InputStream in, final long size) {
+ this.in = in;
+ bytesRemaining = size;
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (bytesRemaining > 0) {
+ --bytesRemaining;
+ return in.read();
+ } else {
+ return -1;
+ }
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ if (bytesRemaining == 0) {
+ return -1;
+ }
+ int bytesToRead = len;
+ if (bytesToRead > bytesRemaining) {
+ bytesToRead = (int) bytesRemaining;
+ }
+ final int bytesRead = in.read(b, off, bytesToRead);
+ if (bytesRead >= 0) {
+ bytesRemaining -= bytesRead;
+ }
+ return bytesRead;
+ }
+
+ @Override
+ public void close() {
+ // there isn't anything to close in this stream and the nested
+ // stream is controlled externally
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/utils/CRC32VerifyingInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/utils/CRC32VerifyingInputStream.java
new file mode 100644
index 000000000..0dc5b9882
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/utils/CRC32VerifyingInputStream.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.utils;
+
+import java.io.InputStream;
+import java.util.zip.CRC32;
+
+/**
+ * A stream that verifies the CRC of the data read once the stream is
+ * exhausted.
+ * @NotThreadSafe
+ * @since 1.6
+ */
+public class CRC32VerifyingInputStream extends ChecksumVerifyingInputStream {
+
+ public CRC32VerifyingInputStream(final InputStream in, final long size, final int expectedCrc32) {
+ this(in, size, expectedCrc32 & 0xFFFFffffl);
+ }
+
+ /**
+ * @since 1.7
+ */
+ public CRC32VerifyingInputStream(final InputStream in, final long size, final long expectedCrc32) {
+ super(new CRC32(), in, size, expectedCrc32);
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/utils/CharsetNames.java b/Tools/Cache Editor/src/org/apache/commons/compress/utils/CharsetNames.java
new file mode 100644
index 000000000..6acaeb299
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/utils/CharsetNames.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.commons.compress.utils;
+
+/**
+ * Character encoding names required of every implementation of the Java platform.
+ *
+ * From the Java documentation Standard
+ * charsets:
+ *
+ * Every implementation of the Java platform is required to support the following character encodings. Consult the
+ * release documentation for your implementation to see if any other encodings are supported. Consult the release
+ * documentation for your implementation to see if any other encodings are supported.
+ *
+ *
+ *
+ * US-ASCII
+ * - Seven-bit ASCII, a.k.a. ISO646-US, a.k.a. the Basic Latin block of the Unicode character set.
+ * ISO-8859-1
+ * - ISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1.
+ * UTF-8
+ * - Eight-bit Unicode Transformation Format.
+ * UTF-16BE
+ * - Sixteen-bit Unicode Transformation Format, big-endian byte order.
+ * UTF-16LE
+ * - Sixteen-bit Unicode Transformation Format, little-endian byte order.
+ * UTF-16
+ * - Sixteen-bit Unicode Transformation Format, byte order specified by a mandatory initial byte-order mark (either order
+ * accepted on input, big-endian used on output.)
+ *
+ *
+ * This perhaps would best belong in the [lang] project. Even if a similar interface is defined in [lang], it is not
+ * foreseen that [compress] would be made to depend on [lang].
+ *
+ * @see Standard charsets
+ * @since 1.4
+ * @version $Id: CharsetNames.java 1552970 2013-12-22 07:03:43Z bodewig $
+ */
+public class CharsetNames {
+ /**
+ * CharEncodingISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1.
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final String ISO_8859_1 = "ISO-8859-1";
+
+ /**
+ *
+ * Seven-bit ASCII, also known as ISO646-US, also known as the Basic Latin block of the Unicode character set.
+ *
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final String US_ASCII = "US-ASCII";
+
+ /**
+ *
+ * Sixteen-bit Unicode Transformation Format, The byte order specified by a mandatory initial byte-order mark
+ * (either order accepted on input, big-endian used on output)
+ *
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final String UTF_16 = "UTF-16";
+
+ /**
+ *
+ * Sixteen-bit Unicode Transformation Format, big-endian byte order.
+ *
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final String UTF_16BE = "UTF-16BE";
+
+ /**
+ *
+ * Sixteen-bit Unicode Transformation Format, little-endian byte order.
+ *
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final String UTF_16LE = "UTF-16LE";
+
+ /**
+ *
+ * Eight-bit Unicode Transformation Format.
+ *
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final String UTF_8 = "UTF-8";
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/utils/Charsets.java b/Tools/Cache Editor/src/org/apache/commons/compress/utils/Charsets.java
new file mode 100644
index 000000000..fb5ded363
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/utils/Charsets.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.commons.compress.utils;
+
+import java.nio.charset.Charset;
+
+/**
+ * Charsets required of every implementation of the Java platform.
+ *
+ * From the Java documentation Standard
+ * charsets:
+ *
+ * Every implementation of the Java platform is required to support the following character encodings. Consult the
+ * release documentation for your implementation to see if any other encodings are supported. Consult the release
+ * documentation for your implementation to see if any other encodings are supported.
+ *
+ *
+ *
+ * US-ASCII
+ * - Seven-bit ASCII, a.k.a. ISO646-US, a.k.a. the Basic Latin block of the Unicode character set.
+ * ISO-8859-1
+ * - ISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1.
+ * UTF-8
+ * - Eight-bit Unicode Transformation Format.
+ * UTF-16BE
+ * - Sixteen-bit Unicode Transformation Format, big-endian byte order.
+ * UTF-16LE
+ * - Sixteen-bit Unicode Transformation Format, little-endian byte order.
+ * UTF-16
+ * - Sixteen-bit Unicode Transformation Format, byte order specified by a mandatory initial byte-order mark (either order
+ * accepted on input, big-endian used on output.)
+ *
+ *
+ * This class best belongs in the Commons Lang or IO project. Even if a similar class is defined in another Commons
+ * component, it is not foreseen that Commons Compress would be made to depend on another Commons component.
+ *
+ * @see Standard charsets
+ * @since 1.4
+ * @version $Id: Charsets.java 1552970 2013-12-22 07:03:43Z bodewig $
+ */
+public class Charsets {
+
+ //
+ // This class should only contain Charset instances for required encodings. This guarantees that it will load correctly and
+ // without delay on all Java platforms.
+ //
+
+ /**
+ * Returns the given Charset or the default Charset if the given Charset is null.
+ *
+ * @param charset
+ * A charset or null.
+ * @return the given Charset or the default Charset if the given Charset is null
+ */
+ public static Charset toCharset(Charset charset) {
+ return charset == null ? Charset.defaultCharset() : charset;
+ }
+
+ /**
+ * Returns a Charset for the named charset. If the name is null, return the default Charset.
+ *
+ * @param charset
+ * The name of the requested charset, may be null.
+ * @return a Charset for the named charset
+ * @throws java.nio.charset.UnsupportedCharsetException
+ * If the named charset is unavailable
+ * @throws java.nio.charset.IllegalCharsetNameException
+ * If the given charset name is illegal
+ */
+ public static Charset toCharset(String charset) {
+ return charset == null ? Charset.defaultCharset() : Charset.forName(charset);
+ }
+
+ /**
+ * CharsetNamesISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1.
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final Charset ISO_8859_1 = Charset.forName(CharsetNames.ISO_8859_1);
+
+ /**
+ *
+ * Seven-bit ASCII, also known as ISO646-US, also known as the Basic Latin block of the Unicode character set.
+ *
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final Charset US_ASCII = Charset.forName(CharsetNames.US_ASCII);
+
+ /**
+ *
+ * Sixteen-bit Unicode Transformation Format, The byte order specified by a mandatory initial byte-order mark
+ * (either order accepted on input, big-endian used on output)
+ *
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final Charset UTF_16 = Charset.forName(CharsetNames.UTF_16);
+
+ /**
+ *
+ * Sixteen-bit Unicode Transformation Format, big-endian byte order.
+ *
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final Charset UTF_16BE = Charset.forName(CharsetNames.UTF_16BE);
+
+ /**
+ *
+ * Sixteen-bit Unicode Transformation Format, little-endian byte order.
+ *
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final Charset UTF_16LE = Charset.forName(CharsetNames.UTF_16LE);
+
+ /**
+ *
+ * Eight-bit Unicode Transformation Format.
+ *
+ *
+ * Every implementation of the Java platform is required to support this character encoding.
+ *
+ *
+ * @see Standard charsets
+ */
+ public static final Charset UTF_8 = Charset.forName(CharsetNames.UTF_8);
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/utils/ChecksumVerifyingInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/utils/ChecksumVerifyingInputStream.java
new file mode 100644
index 000000000..69bf03c16
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/utils/ChecksumVerifyingInputStream.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.commons.compress.utils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.zip.Checksum;
+
+/**
+ * A stream that verifies the checksum of the data read once the stream is
+ * exhausted.
+ * @NotThreadSafe
+ * @since 1.7
+ */
+public class ChecksumVerifyingInputStream extends InputStream {
+ private final InputStream in;
+ private long bytesRemaining;
+ private final long expectedChecksum;
+ private final Checksum checksum;
+
+ public ChecksumVerifyingInputStream(final Checksum checksum, final InputStream in,
+ final long size, final long expectedChecksum) {
+ this.checksum = checksum;
+ this.in = in;
+ this.expectedChecksum = expectedChecksum;
+ this.bytesRemaining = size;
+ }
+
+ /**
+ * Reads a single byte from the stream
+ * @throws IOException if the underlying stream throws or the
+ * stream is exhausted and the Checksum doesn't match the expected
+ * value
+ */
+ @Override
+ public int read() throws IOException {
+ if (bytesRemaining <= 0) {
+ return -1;
+ }
+ int ret = in.read();
+ if (ret >= 0) {
+ checksum.update(ret);
+ --bytesRemaining;
+ }
+ if (bytesRemaining == 0 && expectedChecksum != checksum.getValue()) {
+ throw new IOException("Checksum verification failed");
+ }
+ return ret;
+ }
+
+ /**
+ * Reads a byte array from the stream
+ * @throws IOException if the underlying stream throws or the
+ * stream is exhausted and the Checksum doesn't match the expected
+ * value
+ */
+ @Override
+ public int read(byte[] b) throws IOException {
+ return read(b, 0, b.length);
+ }
+
+ /**
+ * Reads from the stream into a byte array.
+ * @throws IOException if the underlying stream throws or the
+ * stream is exhausted and the Checksum doesn't match the expected
+ * value
+ */
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ int ret = in.read(b, off, len);
+ if (ret >= 0) {
+ checksum.update(b, off, ret);
+ bytesRemaining -= ret;
+ }
+ if (bytesRemaining <= 0 && expectedChecksum != checksum.getValue()) {
+ throw new IOException("Checksum verification failed");
+ }
+ return ret;
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ // Can't really skip, we have to hash everything to verify the checksum
+ if (read() >= 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ in.close();
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/utils/CountingInputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/utils/CountingInputStream.java
new file mode 100644
index 000000000..ab26d2d61
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/utils/CountingInputStream.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.utils;
+
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Stream that tracks the number of bytes read.
+ * @since 1.3
+ * @NotThreadSafe
+ */
+public class CountingInputStream extends FilterInputStream {
+ private long bytesRead;
+
+ public CountingInputStream(final InputStream in) {
+ super(in);
+ }
+
+ @Override
+ public int read() throws IOException {
+ int r = in.read();
+ if (r >= 0) {
+ count(1);
+ }
+ return r;
+ }
+ @Override
+ public int read(byte[] b) throws IOException {
+ return read(b, 0, b.length);
+ }
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ int r = in.read(b, off, len);
+ if (r >= 0) {
+ count(r);
+ }
+ return r;
+ }
+ /**
+ * Increments the counter of already read bytes.
+ * Doesn't increment if the EOF has been hit (read == -1)
+ *
+ * @param read the number of bytes read
+ */
+ protected final void count(long read) {
+ if (read != -1) {
+ bytesRead += read;
+ }
+ }
+
+ /**
+ * Returns the current number of bytes read from this stream.
+ * @return the number of read bytes
+ */
+ public long getBytesRead() {
+ return bytesRead;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/utils/CountingOutputStream.java b/Tools/Cache Editor/src/org/apache/commons/compress/utils/CountingOutputStream.java
new file mode 100644
index 000000000..3e62fdec0
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/utils/CountingOutputStream.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.utils;
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * Stream that tracks the number of bytes read.
+ * @since 1.3
+ * @NotThreadSafe
+ */
+public class CountingOutputStream extends FilterOutputStream {
+ private long bytesWritten = 0;
+
+ public CountingOutputStream(final OutputStream out) {
+ super(out);
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ out.write(b);
+ count(1);
+ }
+ @Override
+ public void write(byte[] b) throws IOException {
+ write(b, 0, b.length);
+ }
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ out.write(b, off, len);
+ count(len);
+ }
+
+ /**
+ * Increments the counter of already written bytes.
+ * Doesn't increment if the EOF has been hit (written == -1)
+ *
+ * @param written the number of bytes written
+ */
+ protected void count(long written) {
+ if (written != -1) {
+ bytesWritten += written;
+ }
+ }
+
+ /**
+ * Returns the current number of bytes written to this stream.
+ * @return the number of written bytes
+ */
+ public long getBytesWritten() {
+ return bytesWritten;
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/utils/IOUtils.java b/Tools/Cache Editor/src/org/apache/commons/compress/utils/IOUtils.java
new file mode 100644
index 000000000..8ce49d293
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/utils/IOUtils.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.commons.compress.utils;
+
+import java.io.ByteArrayOutputStream;
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ * Utility functions
+ * @Immutable
+ */
+public final class IOUtils {
+
+ /** Private constructor to prevent instantiation of this utility class. */
+ private IOUtils(){
+ }
+
+ /**
+ * Copies the content of a InputStream into an OutputStream.
+ * Uses a default buffer size of 8024 bytes.
+ *
+ * @param input
+ * the InputStream to copy
+ * @param output
+ * the target Stream
+ * @throws IOException
+ * if an error occurs
+ */
+ public static long copy(final InputStream input, final OutputStream output) throws IOException {
+ return copy(input, output, 8024);
+ }
+
+ /**
+ * Copies the content of a InputStream into an OutputStream
+ *
+ * @param input
+ * the InputStream to copy
+ * @param output
+ * the target Stream
+ * @param buffersize
+ * the buffer size to use
+ * @throws IOException
+ * if an error occurs
+ */
+ public static long copy(final InputStream input, final OutputStream output, int buffersize) throws IOException {
+ final byte[] buffer = new byte[buffersize];
+ int n = 0;
+ long count=0;
+ while (-1 != (n = input.read(buffer))) {
+ output.write(buffer, 0, n);
+ count += n;
+ }
+ return count;
+ }
+
+ /**
+ * Skips the given number of bytes by repeatedly invoking skip on
+ * the given input stream if necessary.
+ *
+ * This method will only skip less than the requested number of
+ * bytes if the end of the input stream has been reached.
+ *
+ * @param input stream to skip bytes in
+ * @param numToSkip the number of bytes to skip
+ * @return the number of bytes actually skipped
+ * @throws IOException
+ */
+ public static long skip(InputStream input, long numToSkip) throws IOException {
+ long available = numToSkip;
+ while (numToSkip > 0) {
+ long skipped = input.skip(numToSkip);
+ if (skipped == 0) {
+ break;
+ }
+ numToSkip -= skipped;
+ }
+ return available - numToSkip;
+ }
+
+ /**
+ * Reads as much from input as possible to fill the given array.
+ *
+ * This method may invoke read repeatedly to fill the array and
+ * only read less bytes than the length of the array if the end of
+ * the stream has been reached.
+ *
+ * @param input stream to read from
+ * @param b buffer to fill
+ * @return the number of bytes actually read
+ * @throws IOException
+ */
+ public static int readFully(InputStream input, byte[] b) throws IOException {
+ return readFully(input, b, 0, b.length);
+ }
+
+ /**
+ * Reads as much from input as possible to fill the given array
+ * with the given amount of bytes.
+ *
+ * This method may invoke read repeatedly to read the bytes and
+ * only read less bytes than the requested length if the end of
+ * the stream has been reached.
+ *
+ * @param input stream to read from
+ * @param b buffer to fill
+ * @param offset offset into the buffer to start filling at
+ * @param len of bytes to read
+ * @return the number of bytes actually read
+ * @throws IOException
+ * if an I/O error has occurred
+ */
+ public static int readFully(InputStream input, byte[] b, int offset, int len)
+ throws IOException {
+ if (len < 0 || offset < 0 || len + offset > b.length) {
+ throw new IndexOutOfBoundsException();
+ }
+ int count = 0, x = 0;
+ while (count != len) {
+ x = input.read(b, offset + count, len - count);
+ if (x == -1) {
+ break;
+ }
+ count += x;
+ }
+ return count;
+ }
+
+ // toByteArray(InputStream) copied from:
+ // commons/proper/io/trunk/src/main/java/org/apache/commons/io/IOUtils.java?revision=1428941
+ // January 8th, 2013
+ //
+ // Assuming our copy() works just as well as theirs! :-)
+
+ /**
+ * Gets the contents of an InputStream as a byte[].
+ *
+ * This method buffers the input internally, so there is no need to use a
+ * BufferedInputStream.
+ *
+ * @param input the InputStream to read from
+ * @return the requested byte array
+ * @throws NullPointerException if the input is null
+ * @throws IOException if an I/O error occurs
+ * @since 1.5
+ */
+ public static byte[] toByteArray(final InputStream input) throws IOException {
+ final ByteArrayOutputStream output = new ByteArrayOutputStream();
+ copy(input, output);
+ return output.toByteArray();
+ }
+
+ /**
+ * Closes the given Closeable and swallows any IOException that may occur.
+ * @param c Closeable to close, can be null
+ * @since 1.7
+ */
+ public static void closeQuietly(Closeable c) {
+ if (c != null) {
+ try {
+ c.close();
+ } catch (IOException ignored) { // NOPMD
+ }
+ }
+ }
+}
diff --git a/Tools/Cache Editor/src/org/apache/commons/compress/utils/package.html b/Tools/Cache Editor/src/org/apache/commons/compress/utils/package.html
new file mode 100644
index 000000000..0409d1267
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/commons/compress/utils/package.html
@@ -0,0 +1,23 @@
+
+
+
+ Contains utilities used internally by the compress library.
+
+
diff --git a/Tools/Cache Editor/src/org/apache/tools/bzip2/BZip2Constants.java b/Tools/Cache Editor/src/org/apache/tools/bzip2/BZip2Constants.java
new file mode 100644
index 000000000..4f832d67d
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/tools/bzip2/BZip2Constants.java
@@ -0,0 +1,136 @@
+/*
+ * The Apache Software License, Version 1.1
+ *
+ * Copyright (c) 2001 The Apache Software Foundation. All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. The end-user documentation included with the redistribution, if
+ * any, must include the following acknowlegement:
+ * "This product includes software developed by the
+ * Apache Software Foundation (http://www.apache.org/)."
+ * Alternately, this acknowlegement may appear in the software itself,
+ * if and wherever such third-party acknowlegements normally appear.
+ *
+ * 4. The names "Ant" and "Apache Software
+ * Foundation" must not be used to endorse or promote products derived
+ * from this software without prior written permission. For written
+ * permission, please contact apache@apache.org.
+ *
+ * 5. Products derived from this software may not be called "Apache"
+ * nor may "Apache" appear in their names without prior written
+ * permission of the Apache Group.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This software consists of voluntary contributions made by many
+ * individuals on behalf of the Apache Software Foundation. For more
+ * information on the Apache Software Foundation, please see
+ * .
+ */
+
+/*
+ * This package is based on the work done by Keiron Liddle, Aftex Software
+ * to whom the Ant project is very grateful for his
+ * great code.
+ */
+
+package org.apache.tools.bzip2;
+
+/**
+ * Base class for both the compress and decompress classes.
+ * Holds common arrays, and static data.
+ *
+ * @author Keiron Liddle
+ */
+public interface BZip2Constants {
+
+ int baseBlockSize = 100000;
+ int MAX_ALPHA_SIZE = 258;
+ int MAX_CODE_LEN = 23;
+ int RUNA = 0;
+ int RUNB = 1;
+ int N_GROUPS = 6;
+ int G_SIZE = 50;
+ int N_ITERS = 4;
+ int MAX_SELECTORS = (2 + (900000 / G_SIZE));
+ int NUM_OVERSHOOT_BYTES = 20;
+
+ int[] rNums = {
+ 619, 720, 127, 481, 931, 816, 813, 233, 566, 247,
+ 985, 724, 205, 454, 863, 491, 741, 242, 949, 214,
+ 733, 859, 335, 708, 621, 574, 73, 654, 730, 472,
+ 419, 436, 278, 496, 867, 210, 399, 680, 480, 51,
+ 878, 465, 811, 169, 869, 675, 611, 697, 867, 561,
+ 862, 687, 507, 283, 482, 129, 807, 591, 733, 623,
+ 150, 238, 59, 379, 684, 877, 625, 169, 643, 105,
+ 170, 607, 520, 932, 727, 476, 693, 425, 174, 647,
+ 73, 122, 335, 530, 442, 853, 695, 249, 445, 515,
+ 909, 545, 703, 919, 874, 474, 882, 500, 594, 612,
+ 641, 801, 220, 162, 819, 984, 589, 513, 495, 799,
+ 161, 604, 958, 533, 221, 400, 386, 867, 600, 782,
+ 382, 596, 414, 171, 516, 375, 682, 485, 911, 276,
+ 98, 553, 163, 354, 666, 933, 424, 341, 533, 870,
+ 227, 730, 475, 186, 263, 647, 537, 686, 600, 224,
+ 469, 68, 770, 919, 190, 373, 294, 822, 808, 206,
+ 184, 943, 795, 384, 383, 461, 404, 758, 839, 887,
+ 715, 67, 618, 276, 204, 918, 873, 777, 604, 560,
+ 951, 160, 578, 722, 79, 804, 96, 409, 713, 940,
+ 652, 934, 970, 447, 318, 353, 859, 672, 112, 785,
+ 645, 863, 803, 350, 139, 93, 354, 99, 820, 908,
+ 609, 772, 154, 274, 580, 184, 79, 626, 630, 742,
+ 653, 282, 762, 623, 680, 81, 927, 626, 789, 125,
+ 411, 521, 938, 300, 821, 78, 343, 175, 128, 250,
+ 170, 774, 972, 275, 999, 639, 495, 78, 352, 126,
+ 857, 956, 358, 619, 580, 124, 737, 594, 701, 612,
+ 669, 112, 134, 694, 363, 992, 809, 743, 168, 974,
+ 944, 375, 748, 52, 600, 747, 642, 182, 862, 81,
+ 344, 805, 988, 739, 511, 655, 814, 334, 249, 515,
+ 897, 955, 664, 981, 649, 113, 974, 459, 893, 228,
+ 433, 837, 553, 268, 926, 240, 102, 654, 459, 51,
+ 686, 754, 806, 760, 493, 403, 415, 394, 687, 700,
+ 946, 670, 656, 610, 738, 392, 760, 799, 887, 653,
+ 978, 321, 576, 617, 626, 502, 894, 679, 243, 440,
+ 680, 879, 194, 572, 640, 724, 926, 56, 204, 700,
+ 707, 151, 457, 449, 797, 195, 791, 558, 945, 679,
+ 297, 59, 87, 824, 713, 663, 412, 693, 342, 606,
+ 134, 108, 571, 364, 631, 212, 174, 643, 304, 329,
+ 343, 97, 430, 751, 497, 314, 983, 374, 822, 928,
+ 140, 206, 73, 263, 980, 736, 876, 478, 430, 305,
+ 170, 514, 364, 692, 829, 82, 855, 953, 676, 246,
+ 369, 970, 294, 750, 807, 827, 150, 790, 288, 923,
+ 804, 378, 215, 828, 592, 281, 565, 555, 710, 82,
+ 896, 831, 547, 261, 524, 462, 293, 465, 502, 56,
+ 661, 821, 976, 991, 658, 869, 905, 758, 745, 193,
+ 768, 550, 608, 933, 378, 286, 215, 979, 792, 961,
+ 61, 688, 793, 644, 986, 403, 106, 366, 905, 644,
+ 372, 567, 466, 434, 645, 210, 389, 550, 919, 135,
+ 780, 773, 635, 389, 707, 100, 626, 958, 165, 504,
+ 920, 176, 193, 713, 857, 265, 203, 50, 668, 108,
+ 645, 990, 626, 197, 510, 357, 358, 850, 858, 364,
+ 936, 638
+ };
+}
diff --git a/Tools/Cache Editor/src/org/apache/tools/bzip2/CBZip2OutputStream.java b/Tools/Cache Editor/src/org/apache/tools/bzip2/CBZip2OutputStream.java
new file mode 100644
index 000000000..5bdbe7eae
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/tools/bzip2/CBZip2OutputStream.java
@@ -0,0 +1,1674 @@
+/*
+ * The Apache Software License, Version 1.1
+ *
+ * Copyright (c) 2001-2003 The Apache Software Foundation. All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. The end-user documentation included with the redistribution, if
+ * any, must include the following acknowlegement:
+ * "This product includes software developed by the
+ * Apache Software Foundation (http://www.apache.org/)."
+ * Alternately, this acknowlegement may appear in the software itself,
+ * if and wherever such third-party acknowlegements normally appear.
+ *
+ * 4. The names "Ant" and "Apache Software
+ * Foundation" must not be used to endorse or promote products derived
+ * from this software without prior written permission. For written
+ * permission, please contact apache@apache.org.
+ *
+ * 5. Products derived from this software may not be called "Apache"
+ * nor may "Apache" appear in their names without prior written
+ * permission of the Apache Group.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This software consists of voluntary contributions made by many
+ * individuals on behalf of the Apache Software Foundation. For more
+ * information on the Apache Software Foundation, please see
+ * .
+ */
+
+/*
+ * This package is based on the work done by Keiron Liddle, Aftex Software
+ * to whom the Ant project is very grateful for his
+ * great code.
+ */
+
+package org.apache.tools.bzip2;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * An output stream that compresses into the BZip2 format (without the file
+ * header chars) into another stream.
+ *
+ * @author Keiron Liddle
+ *
+ * TODO: Update to BZip2 1.0.1
+ */
+public class CBZip2OutputStream extends OutputStream implements BZip2Constants {
+ protected static final int SETMASK = (1 << 21);
+ protected static final int CLEARMASK = (~SETMASK);
+ protected static final int GREATER_ICOST = 15;
+ protected static final int LESSER_ICOST = 0;
+ protected static final int SMALL_THRESH = 20;
+ protected static final int DEPTH_THRESH = 10;
+
+ /*
+ If you are ever unlucky/improbable enough
+ to get a stack overflow whilst sorting,
+ increase the following constant and try
+ again. In practice I have never seen the
+ stack go above 27 elems, so the following
+ limit seems very generous.
+ */
+ protected static final int QSORT_STACK_SIZE = 1000;
+
+ private static void panic() {
+ System.out.println("panic");
+ //throw new CError();
+ }
+
+ private void makeMaps() {
+ int i;
+ nInUse = 0;
+ for (i = 0; i < 256; i++) {
+ if (inUse[i]) {
+ seqToUnseq[nInUse] = (char) i;
+ unseqToSeq[i] = (char) nInUse;
+ nInUse++;
+ }
+ }
+ }
+
+ protected static void hbMakeCodeLengths(char[] len, int[] freq,
+ int alphaSize, int maxLen) {
+ /*
+ Nodes and heap entries run from 1. Entry 0
+ for both the heap and nodes is a sentinel.
+ */
+ int nNodes, nHeap, n1, n2, i, j, k;
+ boolean tooLong;
+
+ int[] heap = new int[MAX_ALPHA_SIZE + 2];
+ int[] weight = new int[MAX_ALPHA_SIZE * 2];
+ int[] parent = new int[MAX_ALPHA_SIZE * 2];
+
+ for (i = 0; i < alphaSize; i++) {
+ weight[i + 1] = (freq[i] == 0 ? 1 : freq[i]) << 8;
+ }
+
+ while (true) {
+ nNodes = alphaSize;
+ nHeap = 0;
+
+ heap[0] = 0;
+ weight[0] = 0;
+ parent[0] = -2;
+
+ for (i = 1; i <= alphaSize; i++) {
+ parent[i] = -1;
+ nHeap++;
+ heap[nHeap] = i;
+ {
+ int zz, tmp;
+ zz = nHeap;
+ tmp = heap[zz];
+ while (weight[tmp] < weight[heap[zz >> 1]]) {
+ heap[zz] = heap[zz >> 1];
+ zz >>= 1;
+ }
+ heap[zz] = tmp;
+ }
+ }
+ if (!(nHeap < (MAX_ALPHA_SIZE + 2))) {
+ panic();
+ }
+
+ while (nHeap > 1) {
+ n1 = heap[1];
+ heap[1] = heap[nHeap];
+ nHeap--;
+ {
+ int zz = 0, yy = 0, tmp = 0;
+ zz = 1;
+ tmp = heap[zz];
+ while (true) {
+ yy = zz << 1;
+ if (yy > nHeap) {
+ break;
+ }
+ if (yy < nHeap
+ && weight[heap[yy + 1]] < weight[heap[yy]]) {
+ yy++;
+ }
+ if (weight[tmp] < weight[heap[yy]]) {
+ break;
+ }
+ heap[zz] = heap[yy];
+ zz = yy;
+ }
+ heap[zz] = tmp;
+ }
+ n2 = heap[1];
+ heap[1] = heap[nHeap];
+ nHeap--;
+ {
+ int zz = 0, yy = 0, tmp = 0;
+ zz = 1;
+ tmp = heap[zz];
+ while (true) {
+ yy = zz << 1;
+ if (yy > nHeap) {
+ break;
+ }
+ if (yy < nHeap
+ && weight[heap[yy + 1]] < weight[heap[yy]]) {
+ yy++;
+ }
+ if (weight[tmp] < weight[heap[yy]]) {
+ break;
+ }
+ heap[zz] = heap[yy];
+ zz = yy;
+ }
+ heap[zz] = tmp;
+ }
+ nNodes++;
+ parent[n1] = parent[n2] = nNodes;
+
+ weight[nNodes] = ((weight[n1] & 0xffffff00)
+ + (weight[n2] & 0xffffff00))
+ | (1 + (((weight[n1] & 0x000000ff) >
+ (weight[n2] & 0x000000ff)) ?
+ (weight[n1] & 0x000000ff) :
+ (weight[n2] & 0x000000ff)));
+
+ parent[nNodes] = -1;
+ nHeap++;
+ heap[nHeap] = nNodes;
+ {
+ int zz = 0, tmp = 0;
+ zz = nHeap;
+ tmp = heap[zz];
+ while (weight[tmp] < weight[heap[zz >> 1]]) {
+ heap[zz] = heap[zz >> 1];
+ zz >>= 1;
+ }
+ heap[zz] = tmp;
+ }
+ }
+ if (!(nNodes < (MAX_ALPHA_SIZE * 2))) {
+ panic();
+ }
+
+ tooLong = false;
+ for (i = 1; i <= alphaSize; i++) {
+ j = 0;
+ k = i;
+ while (parent[k] >= 0) {
+ k = parent[k];
+ j++;
+ }
+ len[i - 1] = (char) j;
+ if (j > maxLen) {
+ tooLong = true;
+ }
+ }
+
+ if (!tooLong) {
+ break;
+ }
+
+ for (i = 1; i < alphaSize; i++) {
+ j = weight[i] >> 8;
+ j = 1 + (j / 2);
+ weight[i] = j << 8;
+ }
+ }
+ }
+
+ /*
+ index of the last char in the block, so
+ the block size == last + 1.
+ */
+ int last;
+
+ /*
+ index in zptr[] of original string after sorting.
+ */
+ int origPtr;
+
+ /*
+ always: in the range 0 .. 9.
+ The current block size is 100000 * this number.
+ */
+ int blockSize100k;
+
+ boolean blockRandomised;
+
+ int bytesOut;
+ int bsBuff;
+ int bsLive;
+ CRC mCrc = new CRC();
+
+ private boolean[] inUse = new boolean[256];
+ private int nInUse;
+
+ private char[] seqToUnseq = new char[256];
+ private char[] unseqToSeq = new char[256];
+
+ private char[] selector = new char[MAX_SELECTORS];
+ private char[] selectorMtf = new char[MAX_SELECTORS];
+
+ private char[] block;
+ private int[] quadrant;
+ private int[] zptr;
+ private short[] szptr;
+ private int[] ftab;
+
+ private int nMTF;
+
+ private int[] mtfFreq = new int[MAX_ALPHA_SIZE];
+
+ /*
+ * Used when sorting. If too many long comparisons
+ * happen, we stop sorting, randomise the block
+ * slightly, and try again.
+ */
+ private int workFactor;
+ private int workDone;
+ private int workLimit;
+ private boolean firstAttempt;
+ @SuppressWarnings("unused")
+ private int nBlocksRandomised;
+
+ private int currentChar = -1;
+ private int runLength = 0;
+
+ public CBZip2OutputStream(OutputStream inStream) throws IOException {
+ this(inStream, 9);
+ }
+
+ public CBZip2OutputStream(OutputStream inStream, int inBlockSize)
+ throws IOException {
+ block = null;
+ quadrant = null;
+ zptr = null;
+ ftab = null;
+
+ bsSetStream(inStream);
+
+ workFactor = 50;
+ if (inBlockSize > 9) {
+ inBlockSize = 9;
+ }
+ if (inBlockSize < 1) {
+ inBlockSize = 1;
+ }
+ blockSize100k = inBlockSize;
+ allocateCompressStructures();
+ initialize();
+ initBlock();
+ }
+
+ /**
+ *
+ * modified by Oliver Merkel, 010128
+ *
+ */
+ public void write(int bv) throws IOException {
+ int b = (256 + bv) % 256;
+ if (currentChar != -1) {
+ if (currentChar == b) {
+ runLength++;
+ if (runLength > 254) {
+ writeRun();
+ currentChar = -1;
+ runLength = 0;
+ }
+ } else {
+ writeRun();
+ runLength = 1;
+ currentChar = b;
+ }
+ } else {
+ currentChar = b;
+ runLength++;
+ }
+ }
+
+ private void writeRun() throws IOException {
+ if (last < allowableBlockSize) {
+ inUse[currentChar] = true;
+ for (int i = 0; i < runLength; i++) {
+ mCrc.updateCRC((char) currentChar);
+ }
+ switch (runLength) {
+ case 1:
+ last++;
+ block[last + 1] = (char) currentChar;
+ break;
+ case 2:
+ last++;
+ block[last + 1] = (char) currentChar;
+ last++;
+ block[last + 1] = (char) currentChar;
+ break;
+ case 3:
+ last++;
+ block[last + 1] = (char) currentChar;
+ last++;
+ block[last + 1] = (char) currentChar;
+ last++;
+ block[last + 1] = (char) currentChar;
+ break;
+ default:
+ inUse[runLength - 4] = true;
+ last++;
+ block[last + 1] = (char) currentChar;
+ last++;
+ block[last + 1] = (char) currentChar;
+ last++;
+ block[last + 1] = (char) currentChar;
+ last++;
+ block[last + 1] = (char) currentChar;
+ last++;
+ block[last + 1] = (char) (runLength - 4);
+ break;
+ }
+ } else {
+ endBlock();
+ initBlock();
+ writeRun();
+ }
+ }
+
+ boolean closed = false;
+
+ protected void finalize() throws Throwable {
+ close();
+ super.finalize();
+ }
+
+ public void close() throws IOException {
+ if (closed) {
+ return;
+ }
+
+ if (runLength > 0) {
+ writeRun();
+ }
+ currentChar = -1;
+ endBlock();
+ endCompression();
+ closed = true;
+ super.close();
+ bsStream.close();
+ }
+
+ public void flush() throws IOException {
+ super.flush();
+ bsStream.flush();
+ }
+
+ private int blockCRC, combinedCRC;
+
+ private void initialize() throws IOException {
+ bytesOut = 0;
+ nBlocksRandomised = 0;
+
+ /* Write `magic' bytes h indicating file-format == huffmanised,
+ followed by a digit indicating blockSize100k.
+ */
+ bsPutUChar('h');
+ bsPutUChar('0' + blockSize100k);
+
+ combinedCRC = 0;
+ }
+
+ private int allowableBlockSize;
+
+ private void initBlock() {
+ // blockNo++;
+ mCrc.initialiseCRC();
+ last = -1;
+ // ch = 0;
+
+ for (int i = 0; i < 256; i++) {
+ inUse[i] = false;
+ }
+
+ /* 20 is just a paranoia constant */
+ allowableBlockSize = baseBlockSize * blockSize100k - 20;
+ }
+
+ private void endBlock() throws IOException {
+ blockCRC = mCrc.getFinalCRC();
+ combinedCRC = (combinedCRC << 1) | (combinedCRC >>> 31);
+ combinedCRC ^= blockCRC;
+
+ /* sort the block and establish posn of original string */
+ doReversibleTransformation();
+
+ /*
+ A 6-byte block header, the value chosen arbitrarily
+ as 0x314159265359 :-). A 32 bit value does not really
+ give a strong enough guarantee that the value will not
+ appear by chance in the compressed datastream. Worst-case
+ probability of this event, for a 900k block, is about
+ 2.0e-3 for 32 bits, 1.0e-5 for 40 bits and 4.0e-8 for 48 bits.
+ For a compressed file of size 100Gb -- about 100000 blocks --
+ only a 48-bit marker will do. NB: normal compression/
+ decompression do *not* rely on these statistical properties.
+ They are only important when trying to recover blocks from
+ damaged files.
+ */
+ bsPutUChar(0x31);
+ bsPutUChar(0x41);
+ bsPutUChar(0x59);
+ bsPutUChar(0x26);
+ bsPutUChar(0x53);
+ bsPutUChar(0x59);
+
+ /* Now the block's CRC, so it is in a known place. */
+ bsPutint(blockCRC);
+
+ /* Now a single bit indicating randomisation. */
+ if (blockRandomised) {
+ bsW(1, 1);
+ nBlocksRandomised++;
+ } else {
+ bsW(1, 0);
+ }
+
+ /* Finally, block's contents proper. */
+ moveToFrontCodeAndSend();
+ }
+
+ private void endCompression() throws IOException {
+ /*
+ Now another magic 48-bit number, 0x177245385090, to
+ indicate the end of the last block. (sqrt(pi), if
+ you want to know. I did want to use e, but it contains
+ too much repetition -- 27 18 28 18 28 46 -- for me
+ to feel statistically comfortable. Call me paranoid.)
+ */
+ bsPutUChar(0x17);
+ bsPutUChar(0x72);
+ bsPutUChar(0x45);
+ bsPutUChar(0x38);
+ bsPutUChar(0x50);
+ bsPutUChar(0x90);
+
+ bsPutint(combinedCRC);
+
+ bsFinishedWithStream();
+ }
+
+ private void hbAssignCodes (int[] code, char[] length, int minLen,
+ int maxLen, int alphaSize) {
+ int n, vec, i;
+
+ vec = 0;
+ for (n = minLen; n <= maxLen; n++) {
+ for (i = 0; i < alphaSize; i++) {
+ if (length[i] == n) {
+ code[i] = vec;
+ vec++;
+ }
+ };
+ vec <<= 1;
+ }
+ }
+
+ private void bsSetStream(OutputStream f) {
+ bsStream = f;
+ bsLive = 0;
+ bsBuff = 0;
+ bytesOut = 0;
+ }
+
+ private void bsFinishedWithStream() throws IOException {
+ while (bsLive > 0) {
+ int ch = (bsBuff >> 24);
+ try {
+ bsStream.write(ch); // write 8-bit
+ } catch (IOException e) {
+ throw e;
+ }
+ bsBuff <<= 8;
+ bsLive -= 8;
+ bytesOut++;
+ }
+ }
+
+ private void bsW(int n, int v) throws IOException {
+ while (bsLive >= 8) {
+ int ch = (bsBuff >> 24);
+ try {
+ bsStream.write(ch); // write 8-bit
+ } catch (IOException e) {
+ throw e;
+ }
+ bsBuff <<= 8;
+ bsLive -= 8;
+ bytesOut++;
+ }
+ bsBuff |= (v << (32 - bsLive - n));
+ bsLive += n;
+ }
+
+ private void bsPutUChar(int c) throws IOException {
+ bsW(8, c);
+ }
+
+ private void bsPutint(int u) throws IOException {
+ bsW(8, (u >> 24) & 0xff);
+ bsW(8, (u >> 16) & 0xff);
+ bsW(8, (u >> 8) & 0xff);
+ bsW(8, u & 0xff);
+ }
+
+ private void bsPutIntVS(int numBits, int c) throws IOException {
+ bsW(numBits, c);
+ }
+
+ private void sendMTFValues() throws IOException {
+ char len[][] = new char[N_GROUPS][MAX_ALPHA_SIZE];
+
+ @SuppressWarnings("unused")
+ int v, t, i, j, gs, ge, totc, bt, bc, iter;
+ int nSelectors = 0, alphaSize, minLen, maxLen, selCtr;
+ @SuppressWarnings("unused")
+ int nGroups, nBytes;
+
+ alphaSize = nInUse + 2;
+ for (t = 0; t < N_GROUPS; t++) {
+ for (v = 0; v < alphaSize; v++) {
+ len[t][v] = (char) GREATER_ICOST;
+ }
+ }
+
+ /* Decide how many coding tables to use */
+ if (nMTF <= 0) {
+ panic();
+ }
+
+ if (nMTF < 200) {
+ nGroups = 2;
+ } else if (nMTF < 600) {
+ nGroups = 3;
+ } else if (nMTF < 1200) {
+ nGroups = 4;
+ } else if (nMTF < 2400) {
+ nGroups = 5;
+ } else {
+ nGroups = 6;
+ }
+
+ /* Generate an initial set of coding tables */ {
+ int nPart, remF, tFreq, aFreq;
+
+ nPart = nGroups;
+ remF = nMTF;
+ gs = 0;
+ while (nPart > 0) {
+ tFreq = remF / nPart;
+ ge = gs - 1;
+ aFreq = 0;
+ while (aFreq < tFreq && ge < alphaSize - 1) {
+ ge++;
+ aFreq += mtfFreq[ge];
+ }
+
+ if (ge > gs && nPart != nGroups && nPart != 1
+ && ((nGroups - nPart) % 2 == 1)) {
+ aFreq -= mtfFreq[ge];
+ ge--;
+ }
+
+ for (v = 0; v < alphaSize; v++) {
+ if (v >= gs && v <= ge) {
+ len[nPart - 1][v] = (char) LESSER_ICOST;
+ } else {
+ len[nPart - 1][v] = (char) GREATER_ICOST;
+ }
+ }
+
+ nPart--;
+ gs = ge + 1;
+ remF -= aFreq;
+ }
+ }
+
+ int[][] rfreq = new int[N_GROUPS][MAX_ALPHA_SIZE];
+ int[] fave = new int[N_GROUPS];
+ short[] cost = new short[N_GROUPS];
+ /*
+ Iterate up to N_ITERS times to improve the tables.
+ */
+ for (iter = 0; iter < N_ITERS; iter++) {
+ for (t = 0; t < nGroups; t++) {
+ fave[t] = 0;
+ }
+
+ for (t = 0; t < nGroups; t++) {
+ for (v = 0; v < alphaSize; v++) {
+ rfreq[t][v] = 0;
+ }
+ }
+
+ nSelectors = 0;
+ totc = 0;
+ gs = 0;
+ while (true) {
+
+ /* Set group start & end marks. */
+ if (gs >= nMTF) {
+ break;
+ }
+ ge = gs + G_SIZE - 1;
+ if (ge >= nMTF) {
+ ge = nMTF - 1;
+ }
+
+ /*
+ Calculate the cost of this group as coded
+ by each of the coding tables.
+ */
+ for (t = 0; t < nGroups; t++) {
+ cost[t] = 0;
+ }
+
+ if (nGroups == 6) {
+ short cost0, cost1, cost2, cost3, cost4, cost5;
+ cost0 = cost1 = cost2 = cost3 = cost4 = cost5 = 0;
+ for (i = gs; i <= ge; i++) {
+ short icv = szptr[i];
+ cost0 += len[0][icv];
+ cost1 += len[1][icv];
+ cost2 += len[2][icv];
+ cost3 += len[3][icv];
+ cost4 += len[4][icv];
+ cost5 += len[5][icv];
+ }
+ cost[0] = cost0;
+ cost[1] = cost1;
+ cost[2] = cost2;
+ cost[3] = cost3;
+ cost[4] = cost4;
+ cost[5] = cost5;
+ } else {
+ for (i = gs; i <= ge; i++) {
+ short icv = szptr[i];
+ for (t = 0; t < nGroups; t++) {
+ cost[t] += len[t][icv];
+ }
+ }
+ }
+
+ /*
+ Find the coding table which is best for this group,
+ and record its identity in the selector table.
+ */
+ bc = 999999999;
+ bt = -1;
+ for (t = 0; t < nGroups; t++) {
+ if (cost[t] < bc) {
+ bc = cost[t];
+ bt = t;
+ }
+ };
+ totc += bc;
+ fave[bt]++;
+ selector[nSelectors] = (char) bt;
+ nSelectors++;
+
+ /*
+ Increment the symbol frequencies for the selected table.
+ */
+ for (i = gs; i <= ge; i++) {
+ rfreq[bt][szptr[i]]++;
+ }
+
+ gs = ge + 1;
+ }
+
+ /*
+ Recompute the tables based on the accumulated frequencies.
+ */
+ for (t = 0; t < nGroups; t++) {
+ hbMakeCodeLengths(len[t], rfreq[t], alphaSize, 20);
+ }
+ }
+
+ rfreq = null;
+ fave = null;
+ cost = null;
+
+ if (!(nGroups < 8)) {
+ panic();
+ }
+ if (!(nSelectors < 32768 && nSelectors <= (2 + (900000 / G_SIZE)))) {
+ panic();
+ }
+
+
+ /* Compute MTF values for the selectors. */
+ {
+ char[] pos = new char[N_GROUPS];
+ char ll_i, tmp2, tmp;
+ for (i = 0; i < nGroups; i++) {
+ pos[i] = (char) i;
+ }
+ for (i = 0; i < nSelectors; i++) {
+ ll_i = selector[i];
+ j = 0;
+ tmp = pos[j];
+ while (ll_i != tmp) {
+ j++;
+ tmp2 = tmp;
+ tmp = pos[j];
+ pos[j] = tmp2;
+ }
+ pos[0] = tmp;
+ selectorMtf[i] = (char) j;
+ }
+ }
+
+ int[][] code = new int[N_GROUPS][MAX_ALPHA_SIZE];
+
+ /* Assign actual codes for the tables. */
+ for (t = 0; t < nGroups; t++) {
+ minLen = 32;
+ maxLen = 0;
+ for (i = 0; i < alphaSize; i++) {
+ if (len[t][i] > maxLen) {
+ maxLen = len[t][i];
+ }
+ if (len[t][i] < minLen) {
+ minLen = len[t][i];
+ }
+ }
+ if (maxLen > 20) {
+ panic();
+ }
+ if (minLen < 1) {
+ panic();
+ }
+ hbAssignCodes(code[t], len[t], minLen, maxLen, alphaSize);
+ }
+
+ /* Transmit the mapping table. */
+ {
+ boolean[] inUse16 = new boolean[16];
+ for (i = 0; i < 16; i++) {
+ inUse16[i] = false;
+ for (j = 0; j < 16; j++) {
+ if (inUse[i * 16 + j]) {
+ inUse16[i] = true;
+ }
+ }
+ }
+
+ nBytes = bytesOut;
+ for (i = 0; i < 16; i++) {
+ if (inUse16[i]) {
+ bsW(1, 1);
+ } else {
+ bsW(1, 0);
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (inUse16[i]) {
+ for (j = 0; j < 16; j++) {
+ if (inUse[i * 16 + j]) {
+ bsW(1, 1);
+ } else {
+ bsW(1, 0);
+ }
+ }
+ }
+ }
+
+ }
+
+ /* Now the selectors. */
+ nBytes = bytesOut;
+ bsW (3, nGroups);
+ bsW (15, nSelectors);
+ for (i = 0; i < nSelectors; i++) {
+ for (j = 0; j < selectorMtf[i]; j++) {
+ bsW(1, 1);
+ }
+ bsW(1, 0);
+ }
+
+ /* Now the coding tables. */
+ nBytes = bytesOut;
+
+ for (t = 0; t < nGroups; t++) {
+ int curr = len[t][0];
+ bsW(5, curr);
+ for (i = 0; i < alphaSize; i++) {
+ while (curr < len[t][i]) {
+ bsW(2, 2);
+ curr++; /* 10 */
+ }
+ while (curr > len[t][i]) {
+ bsW(2, 3);
+ curr--; /* 11 */
+ }
+ bsW (1, 0);
+ }
+ }
+
+ /* And finally, the block data proper */
+ nBytes = bytesOut;
+ selCtr = 0;
+ gs = 0;
+ while (true) {
+ if (gs >= nMTF) {
+ break;
+ }
+ ge = gs + G_SIZE - 1;
+ if (ge >= nMTF) {
+ ge = nMTF - 1;
+ }
+ for (i = gs; i <= ge; i++) {
+ bsW(len[selector[selCtr]][szptr[i]],
+ code[selector[selCtr]][szptr[i]]);
+ }
+
+ gs = ge + 1;
+ selCtr++;
+ }
+ if (!(selCtr == nSelectors)) {
+ panic();
+ }
+ }
+
+ private void moveToFrontCodeAndSend () throws IOException {
+ bsPutIntVS(24, origPtr);
+ generateMTFValues();
+ sendMTFValues();
+ }
+
+ private OutputStream bsStream;
+
+ private void simpleSort(int lo, int hi, int d) {
+ int i, j, h, bigN, hp;
+ int v;
+
+ bigN = hi - lo + 1;
+ if (bigN < 2) {
+ return;
+ }
+
+ hp = 0;
+ while (incs[hp] < bigN) {
+ hp++;
+ }
+ hp--;
+
+ for (; hp >= 0; hp--) {
+ h = incs[hp];
+
+ i = lo + h;
+ while (true) {
+ /* copy 1 */
+ if (i > hi) {
+ break;
+ }
+ v = zptr[i];
+ j = i;
+ while (fullGtU(zptr[j - h] + d, v + d)) {
+ zptr[j] = zptr[j - h];
+ j = j - h;
+ if (j <= (lo + h - 1)) {
+ break;
+ }
+ }
+ zptr[j] = v;
+ i++;
+
+ /* copy 2 */
+ if (i > hi) {
+ break;
+ }
+ v = zptr[i];
+ j = i;
+ while (fullGtU(zptr[j - h] + d, v + d)) {
+ zptr[j] = zptr[j - h];
+ j = j - h;
+ if (j <= (lo + h - 1)) {
+ break;
+ }
+ }
+ zptr[j] = v;
+ i++;
+
+ /* copy 3 */
+ if (i > hi) {
+ break;
+ }
+ v = zptr[i];
+ j = i;
+ while (fullGtU(zptr[j - h] + d, v + d)) {
+ zptr[j] = zptr[j - h];
+ j = j - h;
+ if (j <= (lo + h - 1)) {
+ break;
+ }
+ }
+ zptr[j] = v;
+ i++;
+
+ if (workDone > workLimit && firstAttempt) {
+ return;
+ }
+ }
+ }
+ }
+
+ private void vswap(int p1, int p2, int n) {
+ int temp = 0;
+ while (n > 0) {
+ temp = zptr[p1];
+ zptr[p1] = zptr[p2];
+ zptr[p2] = temp;
+ p1++;
+ p2++;
+ n--;
+ }
+ }
+
+ private char med3(char a, char b, char c) {
+ char t;
+ if (a > b) {
+ t = a;
+ a = b;
+ b = t;
+ }
+ if (b > c) {
+ t = b;
+ b = c;
+ c = t;
+ }
+ if (a > b) {
+ b = a;
+ }
+ return b;
+ }
+
+ private static class StackElem {
+ int ll;
+ int hh;
+ int dd;
+ }
+
+ private void qSort3(int loSt, int hiSt, int dSt) {
+ int unLo, unHi, ltLo, gtHi, med, n, m;
+ int sp, lo, hi, d;
+ StackElem[] stack = new StackElem[QSORT_STACK_SIZE];
+ for (int count = 0; count < QSORT_STACK_SIZE; count++) {
+ stack[count] = new StackElem();
+ }
+
+ sp = 0;
+
+ stack[sp].ll = loSt;
+ stack[sp].hh = hiSt;
+ stack[sp].dd = dSt;
+ sp++;
+
+ while (sp > 0) {
+ if (sp >= QSORT_STACK_SIZE) {
+ panic();
+ }
+
+ sp--;
+ lo = stack[sp].ll;
+ hi = stack[sp].hh;
+ d = stack[sp].dd;
+
+ if (hi - lo < SMALL_THRESH || d > DEPTH_THRESH) {
+ simpleSort(lo, hi, d);
+ if (workDone > workLimit && firstAttempt) {
+ return;
+ }
+ continue;
+ }
+
+ med = med3(block[zptr[lo] + d + 1],
+ block[zptr[hi ] + d + 1],
+ block[zptr[(lo + hi) >> 1] + d + 1]);
+
+ unLo = ltLo = lo;
+ unHi = gtHi = hi;
+
+ while (true) {
+ while (true) {
+ if (unLo > unHi) {
+ break;
+ }
+ n = ((int) block[zptr[unLo] + d + 1]) - med;
+ if (n == 0) {
+ int temp = 0;
+ temp = zptr[unLo];
+ zptr[unLo] = zptr[ltLo];
+ zptr[ltLo] = temp;
+ ltLo++;
+ unLo++;
+ continue;
+ };
+ if (n > 0) {
+ break;
+ }
+ unLo++;
+ }
+ while (true) {
+ if (unLo > unHi) {
+ break;
+ }
+ n = ((int) block[zptr[unHi] + d + 1]) - med;
+ if (n == 0) {
+ int temp = 0;
+ temp = zptr[unHi];
+ zptr[unHi] = zptr[gtHi];
+ zptr[gtHi] = temp;
+ gtHi--;
+ unHi--;
+ continue;
+ };
+ if (n < 0) {
+ break;
+ }
+ unHi--;
+ }
+ if (unLo > unHi) {
+ break;
+ }
+ int temp = 0;
+ temp = zptr[unLo];
+ zptr[unLo] = zptr[unHi];
+ zptr[unHi] = temp;
+ unLo++;
+ unHi--;
+ }
+
+ if (gtHi < ltLo) {
+ stack[sp].ll = lo;
+ stack[sp].hh = hi;
+ stack[sp].dd = d + 1;
+ sp++;
+ continue;
+ }
+
+ n = ((ltLo - lo) < (unLo - ltLo)) ? (ltLo - lo) : (unLo - ltLo);
+ vswap(lo, unLo - n, n);
+ m = ((hi - gtHi) < (gtHi - unHi)) ? (hi - gtHi) : (gtHi - unHi);
+ vswap(unLo, hi - m + 1, m);
+
+ n = lo + unLo - ltLo - 1;
+ m = hi - (gtHi - unHi) + 1;
+
+ stack[sp].ll = lo;
+ stack[sp].hh = n;
+ stack[sp].dd = d;
+ sp++;
+
+ stack[sp].ll = n + 1;
+ stack[sp].hh = m - 1;
+ stack[sp].dd = d + 1;
+ sp++;
+
+ stack[sp].ll = m;
+ stack[sp].hh = hi;
+ stack[sp].dd = d;
+ sp++;
+ }
+ }
+
+ private void mainSort() {
+ int i, j, ss, sb;
+ int[] runningOrder = new int[256];
+ int[] copy = new int[256];
+ boolean[] bigDone = new boolean[256];
+ int c1, c2;
+ @SuppressWarnings("unused")
+ int numQSorted;
+
+ /*
+ In the various block-sized structures, live data runs
+ from 0 to last+NUM_OVERSHOOT_BYTES inclusive. First,
+ set up the overshoot area for block.
+ */
+
+ // if (verbosity >= 4) fprintf ( stderr, " sort initialise ...\n" );
+ for (i = 0; i < NUM_OVERSHOOT_BYTES; i++) {
+ block[last + i + 2] = block[(i % (last + 1)) + 1];
+ }
+ for (i = 0; i <= last + NUM_OVERSHOOT_BYTES; i++) {
+ quadrant[i] = 0;
+ }
+
+ block[0] = (char) (block[last + 1]);
+
+ if (last < 4000) {
+ /*
+ Use simpleSort(), since the full sorting mechanism
+ has quite a large constant overhead.
+ */
+ for (i = 0; i <= last; i++) {
+ zptr[i] = i;
+ }
+ firstAttempt = false;
+ workDone = workLimit = 0;
+ simpleSort(0, last, 0);
+ } else {
+ numQSorted = 0;
+ for (i = 0; i <= 255; i++) {
+ bigDone[i] = false;
+ }
+
+ for (i = 0; i <= 65536; i++) {
+ ftab[i] = 0;
+ }
+
+ c1 = block[0];
+ for (i = 0; i <= last; i++) {
+ c2 = block[i + 1];
+ ftab[(c1 << 8) + c2]++;
+ c1 = c2;
+ }
+
+ for (i = 1; i <= 65536; i++) {
+ ftab[i] += ftab[i - 1];
+ }
+
+ c1 = block[1];
+ for (i = 0; i < last; i++) {
+ c2 = block[i + 2];
+ j = (c1 << 8) + c2;
+ c1 = c2;
+ ftab[j]--;
+ zptr[ftab[j]] = i;
+ }
+
+ j = ((block[last + 1]) << 8) + (block[1]);
+ ftab[j]--;
+ zptr[ftab[j]] = last;
+
+ /*
+ Now ftab contains the first loc of every small bucket.
+ Calculate the running order, from smallest to largest
+ big bucket.
+ */
+
+ for (i = 0; i <= 255; i++) {
+ runningOrder[i] = i;
+ }
+
+ {
+ int vv;
+ int h = 1;
+ do {
+ h = 3 * h + 1;
+ }
+ while (h <= 256);
+ do {
+ h = h / 3;
+ for (i = h; i <= 255; i++) {
+ vv = runningOrder[i];
+ j = i;
+ while ((ftab[((runningOrder[j - h]) + 1) << 8]
+ - ftab[(runningOrder[j - h]) << 8]) >
+ (ftab[((vv) + 1) << 8] - ftab[(vv) << 8])) {
+ runningOrder[j] = runningOrder[j - h];
+ j = j - h;
+ if (j <= (h - 1)) {
+ break;
+ }
+ }
+ runningOrder[j] = vv;
+ }
+ } while (h != 1);
+ }
+
+ /*
+ The main sorting loop.
+ */
+ for (i = 0; i <= 255; i++) {
+
+ /*
+ Process big buckets, starting with the least full.
+ */
+ ss = runningOrder[i];
+
+ /*
+ Complete the big bucket [ss] by quicksorting
+ any unsorted small buckets [ss, j]. Hopefully
+ previous pointer-scanning phases have already
+ completed many of the small buckets [ss, j], so
+ we don't have to sort them at all.
+ */
+ for (j = 0; j <= 255; j++) {
+ sb = (ss << 8) + j;
+ if (!((ftab[sb] & SETMASK) == SETMASK)) {
+ int lo = ftab[sb] & CLEARMASK;
+ int hi = (ftab[sb + 1] & CLEARMASK) - 1;
+ if (hi > lo) {
+ qSort3(lo, hi, 2);
+ numQSorted += (hi - lo + 1);
+ if (workDone > workLimit && firstAttempt) {
+ return;
+ }
+ }
+ ftab[sb] |= SETMASK;
+ }
+ }
+
+ /*
+ The ss big bucket is now done. Record this fact,
+ and update the quadrant descriptors. Remember to
+ update quadrants in the overshoot area too, if
+ necessary. The "if (i < 255)" test merely skips
+ this updating for the last bucket processed, since
+ updating for the last bucket is pointless.
+ */
+ bigDone[ss] = true;
+
+ if (i < 255) {
+ int bbStart = ftab[ss << 8] & CLEARMASK;
+ int bbSize = (ftab[(ss + 1) << 8] & CLEARMASK) - bbStart;
+ int shifts = 0;
+
+ while ((bbSize >> shifts) > 65534) {
+ shifts++;
+ }
+
+ for (j = 0; j < bbSize; j++) {
+ int a2update = zptr[bbStart + j];
+ int qVal = (j >> shifts);
+ quadrant[a2update] = qVal;
+ if (a2update < NUM_OVERSHOOT_BYTES) {
+ quadrant[a2update + last + 1] = qVal;
+ }
+ }
+
+ if (!(((bbSize - 1) >> shifts) <= 65535)) {
+ panic();
+ }
+ }
+
+ /*
+ Now scan this big bucket so as to synthesise the
+ sorted order for small buckets [t, ss] for all t != ss.
+ */
+ for (j = 0; j <= 255; j++) {
+ copy[j] = ftab[(j << 8) + ss] & CLEARMASK;
+ }
+
+ for (j = ftab[ss << 8] & CLEARMASK;
+ j < (ftab[(ss + 1) << 8] & CLEARMASK); j++) {
+ c1 = block[zptr[j]];
+ if (!bigDone[c1]) {
+ zptr[copy[c1]] = zptr[j] == 0 ? last : zptr[j] - 1;
+ copy[c1]++;
+ }
+ }
+
+ for (j = 0; j <= 255; j++) {
+ ftab[(j << 8) + ss] |= SETMASK;
+ }
+ }
+ }
+ }
+
+ private void randomiseBlock() {
+ int i;
+ int rNToGo = 0;
+ int rTPos = 0;
+ for (i = 0; i < 256; i++) {
+ inUse[i] = false;
+ }
+
+ for (i = 0; i <= last; i++) {
+ if (rNToGo == 0) {
+ rNToGo = (char) rNums[rTPos];
+ rTPos++;
+ if (rTPos == 512) {
+ rTPos = 0;
+ }
+ }
+ rNToGo--;
+ block[i + 1] ^= ((rNToGo == 1) ? 1 : 0);
+ // handle 16 bit signed numbers
+ block[i + 1] &= 0xFF;
+
+ inUse[block[i + 1]] = true;
+ }
+ }
+
+ private void doReversibleTransformation() {
+ int i;
+
+ workLimit = workFactor * last;
+ workDone = 0;
+ blockRandomised = false;
+ firstAttempt = true;
+
+ mainSort();
+
+ if (workDone > workLimit && firstAttempt) {
+ randomiseBlock();
+ workLimit = workDone = 0;
+ blockRandomised = true;
+ firstAttempt = false;
+ mainSort();
+ }
+
+ origPtr = -1;
+ for (i = 0; i <= last; i++) {
+ if (zptr[i] == 0) {
+ origPtr = i;
+ break;
+ }
+ };
+
+ if (origPtr == -1) {
+ panic();
+ }
+ }
+
+ private boolean fullGtU(int i1, int i2) {
+ int k;
+ char c1, c2;
+ int s1, s2;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2) {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2) {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2) {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2) {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2) {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2) {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ k = last + 1;
+
+ do {
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2) {
+ return (c1 > c2);
+ }
+ s1 = quadrant[i1];
+ s2 = quadrant[i2];
+ if (s1 != s2) {
+ return (s1 > s2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2) {
+ return (c1 > c2);
+ }
+ s1 = quadrant[i1];
+ s2 = quadrant[i2];
+ if (s1 != s2) {
+ return (s1 > s2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2) {
+ return (c1 > c2);
+ }
+ s1 = quadrant[i1];
+ s2 = quadrant[i2];
+ if (s1 != s2) {
+ return (s1 > s2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2) {
+ return (c1 > c2);
+ }
+ s1 = quadrant[i1];
+ s2 = quadrant[i2];
+ if (s1 != s2) {
+ return (s1 > s2);
+ }
+ i1++;
+ i2++;
+
+ if (i1 > last) {
+ i1 -= last;
+ i1--;
+ };
+ if (i2 > last) {
+ i2 -= last;
+ i2--;
+ };
+
+ k -= 4;
+ workDone++;
+ } while (k >= 0);
+
+ return false;
+ }
+
+ /*
+ Knuth's increments seem to work better
+ than Incerpi-Sedgewick here. Possibly
+ because the number of elems to sort is
+ usually small, typically <= 20.
+ */
+ private int[] incs = { 1, 4, 13, 40, 121, 364, 1093, 3280,
+ 9841, 29524, 88573, 265720,
+ 797161, 2391484 };
+
+ private void allocateCompressStructures () {
+ int n = baseBlockSize * blockSize100k;
+ block = new char[(n + 1 + NUM_OVERSHOOT_BYTES)];
+ quadrant = new int[(n + NUM_OVERSHOOT_BYTES)];
+ zptr = new int[n];
+ ftab = new int[65537];
+
+ if (block == null || quadrant == null || zptr == null
+ || ftab == null) {
+ //int totalDraw = (n + 1 + NUM_OVERSHOOT_BYTES) + (n + NUM_OVERSHOOT_BYTES) + n + 65537;
+ //compressOutOfMemory ( totalDraw, n );
+ }
+
+ /*
+ The back end needs a place to store the MTF values
+ whilst it calculates the coding tables. We could
+ put them in the zptr array. However, these values
+ will fit in a short, so we overlay szptr at the
+ start of zptr, in the hope of reducing the number
+ of cache misses induced by the multiple traversals
+ of the MTF values when calculating coding tables.
+ Seems to improve compression speed by about 1%.
+ */
+ // szptr = zptr;
+
+
+ szptr = new short[2 * n];
+ }
+
+ private void generateMTFValues() {
+ char[] yy = new char[256];
+ int i, j;
+ char tmp;
+ char tmp2;
+ int zPend;
+ int wr;
+ int EOB;
+
+ makeMaps();
+ EOB = nInUse + 1;
+
+ for (i = 0; i <= EOB; i++) {
+ mtfFreq[i] = 0;
+ }
+
+ wr = 0;
+ zPend = 0;
+ for (i = 0; i < nInUse; i++) {
+ yy[i] = (char) i;
+ }
+
+
+ for (i = 0; i <= last; i++) {
+ char ll_i;
+
+ ll_i = unseqToSeq[block[zptr[i]]];
+
+ j = 0;
+ tmp = yy[j];
+ while (ll_i != tmp) {
+ j++;
+ tmp2 = tmp;
+ tmp = yy[j];
+ yy[j] = tmp2;
+ };
+ yy[0] = tmp;
+
+ if (j == 0) {
+ zPend++;
+ } else {
+ if (zPend > 0) {
+ zPend--;
+ while (true) {
+ switch (zPend % 2) {
+ case 0:
+ szptr[wr] = (short) RUNA;
+ wr++;
+ mtfFreq[RUNA]++;
+ break;
+ case 1:
+ szptr[wr] = (short) RUNB;
+ wr++;
+ mtfFreq[RUNB]++;
+ break;
+ };
+ if (zPend < 2) {
+ break;
+ }
+ zPend = (zPend - 2) / 2;
+ };
+ zPend = 0;
+ }
+ szptr[wr] = (short) (j + 1);
+ wr++;
+ mtfFreq[j + 1]++;
+ }
+ }
+
+ if (zPend > 0) {
+ zPend--;
+ while (true) {
+ switch (zPend % 2) {
+ case 0:
+ szptr[wr] = (short) RUNA;
+ wr++;
+ mtfFreq[RUNA]++;
+ break;
+ case 1:
+ szptr[wr] = (short) RUNB;
+ wr++;
+ mtfFreq[RUNB]++;
+ break;
+ }
+ if (zPend < 2) {
+ break;
+ }
+ zPend = (zPend - 2) / 2;
+ }
+ }
+
+ szptr[wr] = (short) EOB;
+ wr++;
+ mtfFreq[EOB]++;
+
+ nMTF = wr;
+ }
+}
+
+
diff --git a/Tools/Cache Editor/src/org/apache/tools/bzip2/CRC.java b/Tools/Cache Editor/src/org/apache/tools/bzip2/CRC.java
new file mode 100644
index 000000000..bc8bc644b
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apache/tools/bzip2/CRC.java
@@ -0,0 +1,167 @@
+/*
+ * The Apache Software License, Version 1.1
+ *
+ * Copyright (c) 2001-2002 The Apache Software Foundation. All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. The end-user documentation included with the redistribution, if
+ * any, must include the following acknowlegement:
+ * "This product includes software developed by the
+ * Apache Software Foundation (http://www.apache.org/)."
+ * Alternately, this acknowlegement may appear in the software itself,
+ * if and wherever such third-party acknowlegements normally appear.
+ *
+ * 4. The names "Ant" and "Apache Software
+ * Foundation" must not be used to endorse or promote products derived
+ * from this software without prior written permission. For written
+ * permission, please contact apache@apache.org.
+ *
+ * 5. Products derived from this software may not be called "Apache"
+ * nor may "Apache" appear in their names without prior written
+ * permission of the Apache Group.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This software consists of voluntary contributions made by many
+ * individuals on behalf of the Apache Software Foundation. For more
+ * information on the Apache Software Foundation, please see
+ * .
+ */
+
+/*
+ * This package is based on the work done by Keiron Liddle, Aftex Software
+ * to whom the Ant project is very grateful for his
+ * great code.
+ */
+
+package org.apache.tools.bzip2;
+
+/**
+ * A simple class the hold and calculate the CRC for sanity checking
+ * of the data.
+ *
+ * @author Keiron Liddle
+ */
+class CRC {
+ public static int crc32Table[] = {
+ 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9,
+ 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005,
+ 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
+ 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd,
+ 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9,
+ 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
+ 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011,
+ 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd,
+ 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
+ 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5,
+ 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81,
+ 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
+ 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49,
+ 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95,
+ 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
+ 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d,
+ 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae,
+ 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
+ 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16,
+ 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca,
+ 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
+ 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02,
+ 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066,
+ 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
+ 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e,
+ 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692,
+ 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
+ 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a,
+ 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e,
+ 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
+ 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686,
+ 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a,
+ 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
+ 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb,
+ 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f,
+ 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
+ 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47,
+ 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b,
+ 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
+ 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623,
+ 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7,
+ 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
+ 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f,
+ 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3,
+ 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
+ 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b,
+ 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f,
+ 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
+ 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640,
+ 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c,
+ 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
+ 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24,
+ 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30,
+ 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
+ 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088,
+ 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654,
+ 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
+ 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c,
+ 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18,
+ 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
+ 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0,
+ 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c,
+ 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
+ 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
+ };
+
+ public CRC() {
+ initialiseCRC();
+ }
+
+ void initialiseCRC() {
+ globalCrc = 0xffffffff;
+ }
+
+ int getFinalCRC() {
+ return ~globalCrc;
+ }
+
+ int getGlobalCRC() {
+ return globalCrc;
+ }
+
+ void setGlobalCRC(int newCrc) {
+ globalCrc = newCrc;
+ }
+
+ void updateCRC(int inCh) {
+ int temp = (globalCrc >> 24) ^ inCh;
+ if (temp < 0) {
+ temp = 256 + temp;
+ }
+ globalCrc = (globalCrc << 8) ^ CRC.crc32Table[temp];
+ }
+
+ int globalCrc;
+}
+
diff --git a/Tools/Cache Editor/src/org/apollo/fs/FileDescriptor.java b/Tools/Cache Editor/src/org/apollo/fs/FileDescriptor.java
new file mode 100644
index 000000000..212703588
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/FileDescriptor.java
@@ -0,0 +1,45 @@
+package org.apollo.fs;
+
+/**
+ * A class which points to a file in the cache.
+ * @author Graham
+ */
+public final class FileDescriptor {
+
+ /**
+ * The file type.
+ */
+ private final int type;
+
+ /**
+ * The file id.
+ */
+ private final int file;
+
+ /**
+ * Creates the file descriptor.
+ * @param type The file type.
+ * @param file The file id.
+ */
+ public FileDescriptor(int type, int file) {
+ this.type = type;
+ this.file = file;
+ }
+
+ /**
+ * Gets the file type.
+ * @return The file type.
+ */
+ public int getType() {
+ return type;
+ }
+
+ /**
+ * Gets the file id.
+ * @return The file id.
+ */
+ public int getFile() {
+ return file;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apollo/fs/FileSystemConstants.java b/Tools/Cache Editor/src/org/apollo/fs/FileSystemConstants.java
new file mode 100644
index 000000000..2c308162b
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/FileSystemConstants.java
@@ -0,0 +1,46 @@
+package org.apollo.fs;
+
+/**
+ * Holds file system related constants.
+ * @author Graham
+ */
+public final class FileSystemConstants {
+
+ /**
+ * The number of caches.
+ */
+ public static final int CACHE_COUNT = 5;
+
+ /**
+ * The number of archives in cache 0.
+ */
+ public static final int ARCHIVE_COUNT = 9;
+
+ /**
+ * The size of an index.
+ */
+ public static final int INDEX_SIZE = 6;
+
+ /**
+ * The size of a header.
+ */
+ public static final int HEADER_SIZE = 8;
+
+ /**
+ * The size of a chunk.
+ */
+ public static final int CHUNK_SIZE = 512;
+
+ /**
+ * The size of a block.
+ */
+ public static final int BLOCK_SIZE = HEADER_SIZE + CHUNK_SIZE;
+
+ /**
+ * Default private constructor to prevent instantiation.
+ */
+ private FileSystemConstants() {
+
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apollo/fs/Index.java b/Tools/Cache Editor/src/org/apollo/fs/Index.java
new file mode 100644
index 000000000..505034f16
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/Index.java
@@ -0,0 +1,62 @@
+package org.apollo.fs;
+
+/**
+ * An {@link Index} points to a file in the {@code main_file_cache.dat} file.
+ * @author Graham
+ */
+public final class Index {
+
+ /**
+ * Decodes a buffer into an index.
+ * @param buffer The buffer.
+ * @return The decoded {@link Index}.
+ * @throws IllegalArgumentException if the buffer length is invalid.
+ */
+ public static Index decode(byte[] buffer) {
+ if (buffer.length != FileSystemConstants.INDEX_SIZE) {
+ throw new IllegalArgumentException("Incorrect buffer length.");
+ }
+
+ int size = ((buffer[0] & 0xFF) << 16) | ((buffer[1] & 0xFF) << 8) | (buffer[2] & 0xFF);
+ int block = ((buffer[3] & 0xFF) << 16) | ((buffer[4] & 0xFF) << 8) | (buffer[5] & 0xFF);
+
+ return new Index(size, block);
+ }
+
+ /**
+ * The size of the file.
+ */
+ private final int size;
+
+ /**
+ * The first block of the file.
+ */
+ private final int block;
+
+ /**
+ * Creates the index.
+ * @param size The size of the file.
+ * @param block The first block of the file.
+ */
+ public Index(int size, int block) {
+ this.size = size;
+ this.block = block;
+ }
+
+ /**
+ * Gets the size of the file.
+ * @return The size of the file.
+ */
+ public int getSize() {
+ return size;
+ }
+
+ /**
+ * Gets the first block of the file.
+ * @return The first block of the file.
+ */
+ public int getBlock() {
+ return block;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apollo/fs/IndexedFileSystem.java b/Tools/Cache Editor/src/org/apollo/fs/IndexedFileSystem.java
new file mode 100644
index 000000000..c8110e050
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/IndexedFileSystem.java
@@ -0,0 +1,291 @@
+package org.apollo.fs;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.util.zip.CRC32;
+
+/**
+ * A file system based on top of the operating system's file system. It
+ * consists of a data file and index files. Index files point to blocks in the
+ * data file, which contains the actual data.
+ * @author Graham
+ */
+public final class IndexedFileSystem implements Closeable {
+
+ /**
+ * Read only flag.
+ */
+ private final boolean readOnly;
+
+ /**
+ * The index files.
+ */
+ private RandomAccessFile[] indices = new RandomAccessFile[256];
+
+ /**
+ * The data file.
+ */
+ private RandomAccessFile data;
+
+ /**
+ * The cached CRC table.
+ */
+ private ByteBuffer crcTable;
+
+ /**
+ * Creates the file system with the specified base directory.
+ * @param base The base directory.
+ * @param readOnly A flag indicating if the file system will be read only.
+ * @throws Exception if the file system is invalid.
+ */
+ public IndexedFileSystem(File base, boolean readOnly) throws Exception {
+ this.readOnly = readOnly;
+ detectLayout(base);
+ }
+
+ /**
+ * Checks if this {@link IndexedFileSystem} is read only.
+ * @return {@code true} if so, {@code false} if not.
+ */
+ public boolean isReadOnly() {
+ return readOnly;
+ }
+
+ /**
+ * Automatically detect the layout of the specified directory.
+ * @param base The base directory.
+ * @throws Exception if the file system is invalid.
+ */
+ private void detectLayout(File base) throws Exception {
+ int indexCount = 0;
+ for (int index = 0; index < indices.length; index++) {
+ File f = new File(base.getAbsolutePath() + "/main_file_cache.idx" + index);
+ if (f.exists() && !f.isDirectory()) {
+ indexCount++;
+ indices[index] = new RandomAccessFile(f, readOnly ? "r" : "rw");
+ }
+ }
+ if (indexCount <= 0) {
+ throw new Exception("No index file(s) present");
+ }
+
+ File oldEngineData = new File(base.getAbsolutePath() + "/main_file_cache.dat");
+ File newEngineData = new File(base.getAbsolutePath() + "/main_file_cache.dat2");
+ if (oldEngineData.exists() && !oldEngineData.isDirectory()) {
+ data = new RandomAccessFile(oldEngineData, readOnly ? "r" : "rw");
+ } else if (newEngineData.exists() && !oldEngineData.isDirectory()) {
+ data = new RandomAccessFile(newEngineData, readOnly ? "r" : "rw");
+ } else {
+ throw new Exception("No data file present");
+ }
+ }
+
+ /**
+ * Gets the index of a file.
+ * @param fd The {@link FileDescriptor} which points to the file.
+ * @return The {@link Index}.
+ * @throws IOException if an I/O error occurs.
+ */
+ private Index getIndex(FileDescriptor fd) throws IOException {
+ int index = fd.getType();
+ if (index < 0 || index >= indices.length) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ byte[] buffer = new byte[FileSystemConstants.INDEX_SIZE];
+ RandomAccessFile indexFile = indices[index];
+ synchronized (indexFile) {
+ long ptr = (long) fd.getFile() * (long) FileSystemConstants.INDEX_SIZE;
+ if (ptr >= 0 && indexFile.length() >= (ptr + FileSystemConstants.INDEX_SIZE)) {
+ indexFile.seek(ptr);
+ indexFile.readFully(buffer);
+ } else {
+ throw new FileNotFoundException();
+ }
+ }
+
+ return Index.decode(buffer);
+ }
+
+ /**
+ * Gets the number of files with the specified type.
+ * @param type The type.
+ * @return The number of files.
+ * @throws IOException if an I/O error occurs.
+ */
+ private int getFileCount(int type) throws IOException {
+ if (type < 0 || type >= indices.length) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ RandomAccessFile indexFile = indices[type];
+ synchronized (indexFile) {
+ return (int) (indexFile.length() / FileSystemConstants.INDEX_SIZE);
+ }
+ }
+
+ /**
+ * Gets the CRC table.
+ * @return The CRC table.
+ * @throws IOException if an I/O erorr occurs.
+ */
+ public ByteBuffer getCrcTable() throws IOException {
+ if (readOnly) {
+ synchronized (this) {
+ if (crcTable != null) {
+ return crcTable.duplicate();
+ }
+ }
+
+ // the number of archives
+ int archives = getFileCount(0);
+
+ // the hash
+ int hash = 1234;
+
+ // the CRCs
+ int[] crcs = new int[archives];
+
+ // calculate the CRCs
+ CRC32 crc32 = new CRC32();
+ for (int i = 1; i < crcs.length; i++) {
+ crc32.reset();
+
+ ByteBuffer bb = getFile(0, i);
+ byte[] bytes = new byte[bb.remaining()];
+ bb.get(bytes, 0, bytes.length);
+ crc32.update(bytes, 0, bytes.length);
+
+ crcs[i] = (int) crc32.getValue();
+ }
+
+ // hash the CRCs and place them in the buffer
+ ByteBuffer buf = ByteBuffer.allocate(crcs.length * 4 + 4);
+ for (int i = 0; i < crcs.length; i++) {
+ hash = (hash << 1) + crcs[i];
+ buf.putInt(crcs[i]);
+ }
+
+ // place the hash into the buffer
+ buf.putInt(hash);
+ buf.flip();
+
+ synchronized (this) {
+ crcTable = buf.asReadOnlyBuffer();
+ return crcTable.duplicate();
+ }
+ } else {
+ throw new IOException("cannot get CRC table from a writable file system");
+ }
+ }
+
+ /**
+ * Gets a file.
+ * @param type The file type.
+ * @param file The file id.
+ * @return A {@link ByteBuffer} which contains the contents of the file.
+ * @throws IOException if an I/O error occurs.
+ */
+ public ByteBuffer getFile(int type, int file) throws IOException {
+ return getFile(new FileDescriptor(type, file));
+ }
+
+ /**
+ * Gets a file.
+ * @param fd The {@link FileDescriptor} which points to the file.
+ * @return A {@link ByteBuffer} which contains the contents of the file.
+ * @throws IOException if an I/O error occurs.
+ */
+ public ByteBuffer getFile(FileDescriptor fd) throws IOException {
+ Index index = getIndex(fd);
+ ByteBuffer buffer = ByteBuffer.allocate(index.getSize());
+
+ // calculate some initial values
+ long ptr = (long) index.getBlock() * (long) FileSystemConstants.BLOCK_SIZE;
+ int read = 0;
+ int size = index.getSize();
+ int blocks = size / FileSystemConstants.CHUNK_SIZE;
+ if (size % FileSystemConstants.CHUNK_SIZE != 0) {
+ blocks++;
+ }
+
+ for (int i = 0; i < blocks; i++) {
+
+ // read header
+ byte[] header = new byte[FileSystemConstants.HEADER_SIZE];
+ synchronized (data) {
+ data.seek(ptr);
+ data.readFully(header);
+ }
+
+ // increment pointers
+ ptr += FileSystemConstants.HEADER_SIZE;
+
+ // parse header
+ int nextFile = ((header[0] & 0xFF) << 8) | (header[1] & 0xFF);
+ int curChunk = ((header[2] & 0xFF) << 8) | (header[3] & 0xFF);
+ int nextBlock = ((header[4] & 0xFF) << 16) | ((header[5] & 0xFF) << 8) | (header[6] & 0xFF);
+ int nextType = header[7] & 0xFF;
+
+ // check expected chunk id is correct
+ if (i != curChunk) {
+ throw new IOException("Chunk id mismatch.");
+ }
+
+ // calculate how much we can read
+ int chunkSize = size - read;
+ if (chunkSize > FileSystemConstants.CHUNK_SIZE) {
+ chunkSize = FileSystemConstants.CHUNK_SIZE;
+ }
+
+ // read the next chunk and put it in the buffer
+ byte[] chunk = new byte[chunkSize];
+ synchronized (data) {
+ data.seek(ptr);
+ data.readFully(chunk);
+ }
+ buffer.put(chunk);
+
+ // increment pointers
+ read += chunkSize;
+ ptr = (long) nextBlock * (long) FileSystemConstants.BLOCK_SIZE;
+
+ // if we still have more data to read, check the validity of the
+ // header
+ if (size > read) {
+ if (nextType != (fd.getType() + 1)) {
+ throw new IOException("File type mismatch.");
+ }
+
+ if (nextFile != fd.getFile()) {
+ throw new IOException("File id mismatch.");
+ }
+ }
+ }
+
+ buffer.flip();
+ return buffer;
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (data != null) {
+ synchronized (data) {
+ data.close();
+ }
+ }
+
+ for (RandomAccessFile index : indices) {
+ if (index != null) {
+ synchronized (index) {
+ index.close();
+ }
+ }
+ }
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apollo/fs/archive/Archive.java b/Tools/Cache Editor/src/org/apollo/fs/archive/Archive.java
new file mode 100644
index 000000000..080a51bb3
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/archive/Archive.java
@@ -0,0 +1,97 @@
+package org.apollo.fs.archive;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apollo.fs.util.ByteBufferUtil;
+import org.apollo.fs.util.CompressionUtil;
+
+/**
+ * Represents an archive.
+ * @author Graham
+ */
+public final class Archive {
+
+ /**
+ * Decodes the archive in the specified buffer.
+ * @param buffer The buffer.
+ * @return The archive.
+ * @throws IOException if an I/O error occurs.
+ */
+ public static Archive decode(ByteBuffer buffer) throws IOException {
+ int extractedSize = ByteBufferUtil.readUnsignedTriByte(buffer);
+ int size = ByteBufferUtil.readUnsignedTriByte(buffer);
+ boolean extracted = false;
+
+ if (size != extractedSize) {
+ byte[] compressed = new byte[size];
+ byte[] uncompressed = new byte[extractedSize];
+ buffer.get(compressed);
+ CompressionUtil.unbzip2(compressed, uncompressed);
+ buffer = ByteBuffer.wrap(uncompressed);
+ extracted = true;
+ }
+
+ int entries = buffer.getShort() & 0xFFFF;
+ int[] identifiers = new int[entries];
+ int[] extractedSizes = new int[entries];
+ int[] sizes = new int[entries];
+
+ for (int i = 0; i < entries; i++) {
+ identifiers[i] = buffer.getInt();
+ extractedSizes[i] = ByteBufferUtil.readUnsignedTriByte(buffer);
+ sizes[i] = ByteBufferUtil.readUnsignedTriByte(buffer);
+ }
+
+ ArchiveEntry[] entry = new ArchiveEntry[entries];
+
+ for (int i = 0; i < entries; i++) {
+ ByteBuffer entryBuffer = ByteBuffer.allocate(extractedSizes[i]);
+ if (!extracted) {
+ byte[] compressed = new byte[sizes[i]];
+ byte[] uncompressed = new byte[extractedSizes[i]];
+ buffer.get(compressed);
+ CompressionUtil.unbzip2(compressed, uncompressed);
+ entryBuffer = ByteBuffer.wrap(uncompressed);
+ }
+ entry[i] = new ArchiveEntry(identifiers[i], entryBuffer);
+ }
+
+ return new Archive(entry);
+ }
+
+ /**
+ * The entries in this archive.
+ */
+ private final ArchiveEntry[] entries;
+
+ /**
+ * Creates a new archive.
+ * @param entries The entries in this archive.
+ */
+ public Archive(ArchiveEntry[] entries) {
+ this.entries = entries;
+ }
+
+ /**
+ * Gets an entry by its name.
+ * @param name The name.
+ * @return The entry.
+ * @throws FileNotFoundException if the file could not be found.
+ */
+ public ArchiveEntry getEntry(String name) throws FileNotFoundException {
+ int hash = 0;
+ name = name.toUpperCase();
+ for (int i = 0; i < name.length(); i++) {
+ hash = (hash * 61 + name.charAt(i)) - 32;
+ }
+ for (ArchiveEntry entry : entries) {
+ if (entry.getIdentifier() == hash) {
+ return entry;
+ }
+ }
+ throw new FileNotFoundException();
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apollo/fs/archive/ArchiveEntry.java b/Tools/Cache Editor/src/org/apollo/fs/archive/ArchiveEntry.java
new file mode 100644
index 000000000..1688483cd
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/archive/ArchiveEntry.java
@@ -0,0 +1,47 @@
+package org.apollo.fs.archive;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Represents a single entry in an {@link Archive}.
+ * @author Graham
+ */
+public final class ArchiveEntry {
+
+ /**
+ * The identifier of this entry.
+ */
+ private final int identifier;
+
+ /**
+ * The buffer of this entry.
+ */
+ private final ByteBuffer buffer;
+
+ /**
+ * Creates a new archive entry.
+ * @param identifier The identifier.
+ * @param buffer The buffer.
+ */
+ public ArchiveEntry(int identifier, ByteBuffer buffer) {
+ this.identifier = identifier;
+ this.buffer = buffer.asReadOnlyBuffer();
+ }
+
+ /**
+ * Gets the identifier of this entry.
+ * @return The identifier of this entry.
+ */
+ public int getIdentifier() {
+ return identifier;
+ }
+
+ /**
+ * Gets the buffer of this entry.
+ * @return This buffer of this entry.
+ */
+ public ByteBuffer getBuffer() {
+ return buffer.duplicate();
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apollo/fs/archive/package-info.java b/Tools/Cache Editor/src/org/apollo/fs/archive/package-info.java
new file mode 100644
index 000000000..0d21259ce
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/archive/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Contains classes which deal with archives.
+ */
+package org.apollo.fs.archive;
diff --git a/Tools/Cache Editor/src/org/apollo/fs/package-info.java b/Tools/Cache Editor/src/org/apollo/fs/package-info.java
new file mode 100644
index 000000000..14c2a3bd8
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/package-info.java
@@ -0,0 +1,5 @@
+/**
+ * Contains classes which deal with the file system that the client uses to
+ * store game data files.
+ */
+package org.apollo.fs;
diff --git a/Tools/Cache Editor/src/org/apollo/fs/parser/package-info.java b/Tools/Cache Editor/src/org/apollo/fs/parser/package-info.java
new file mode 100644
index 000000000..c08852a93
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/parser/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Contains classes which parse files within the game's cache.
+ */
+package org.apollo.fs.parser;
diff --git a/Tools/Cache Editor/src/org/apollo/fs/util/ByteBufferUtil.java b/Tools/Cache Editor/src/org/apollo/fs/util/ByteBufferUtil.java
new file mode 100644
index 000000000..68f7febad
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/util/ByteBufferUtil.java
@@ -0,0 +1,41 @@
+package org.apollo.fs.util;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A utility class which contains {@link ByteBuffer}-related methods.
+ * @author Graham
+ */
+public final class ByteBufferUtil {
+
+ /**
+ * Reads an unsigned tri byte from the specified buffer.
+ * @param buffer The buffer.
+ * @return The tri byte.
+ */
+ public static int readUnsignedTriByte(ByteBuffer buffer) {
+ return ((buffer.get() & 0xFF) << 16) | ((buffer.get() & 0xFF) << 8) | (buffer.get() & 0xFF);
+ }
+
+ /**
+ * Reads a string from the specified buffer.
+ * @param buffer The buffer.
+ * @return The string.
+ */
+ public static String readString(ByteBuffer buffer) {
+ StringBuilder bldr = new StringBuilder();
+ char c;
+ while ((c = (char) buffer.get()) != 10) {
+ bldr.append(c);
+ }
+ return bldr.toString();
+ }
+
+ /**
+ * Default private constructor to prevent instantiation.
+ */
+ private ByteBufferUtil() {
+
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apollo/fs/util/CompressionUtil.java b/Tools/Cache Editor/src/org/apollo/fs/util/CompressionUtil.java
new file mode 100644
index 000000000..173ae2c3b
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/util/CompressionUtil.java
@@ -0,0 +1,103 @@
+package org.apollo.fs.util;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.util.zip.DeflaterOutputStream;
+import java.util.zip.GZIPInputStream;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
+import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream;
+
+/**
+ * A utility class for performing compression/uncompression.
+ * @author Graham
+ */
+public final class CompressionUtil {
+
+ /**
+ * Ungzips the compressed array and places the results into the uncompressed array.
+ * @param compressed The compressed array.
+ * @param uncompressed The uncompressed array.
+ * @throws IOException if an I/O error occurs.
+ */
+ public static void ungzip(byte[] compressed, byte[] uncompressed) throws IOException {
+ DataInputStream is = new DataInputStream(new GZIPInputStream(new ByteArrayInputStream(compressed)));
+ try {
+ is.readFully(uncompressed);
+ } finally {
+ is.close();
+ }
+ }
+
+ /**
+ * Unbzip2s the compressed array and places the result into the uncompressed array.
+ * @param compressed The compressed array.
+ * @param uncompressed The uncompressed array.
+ * @throws IOException if an I/O error occurs.
+ */
+ public static void unbzip2(byte[] compressed, byte[] uncompressed) throws IOException {
+ byte[] newCompressed = new byte[compressed.length + 4];
+ newCompressed[0] = 'B';
+ newCompressed[1] = 'Z';
+ newCompressed[2] = 'h';
+ newCompressed[3] = '1';
+ System.arraycopy(compressed, 0, newCompressed, 4, compressed.length);
+
+ DataInputStream is = new DataInputStream(new BZip2CompressorInputStream(new ByteArrayInputStream(newCompressed)));
+ try {
+ is.readFully(uncompressed);
+ } finally {
+ is.close();
+ }
+ }
+
+ /**
+ * Gzips the specified array.
+ * @param bytes The uncompressed array.
+ * @return The compressed array.
+ * @throws IOException if an I/O error occurs.
+ */
+ public static byte[] gzip(byte[] bytes) throws IOException {
+ ByteArrayOutputStream bout = new ByteArrayOutputStream();
+ DeflaterOutputStream os = new GZIPOutputStream(bout);
+ try {
+ os.write(bytes);
+ os.finish();
+ return bout.toByteArray();
+ } finally {
+ os.close();
+ }
+ }
+
+ /**
+ * Bzip2s the specified array.
+ * @param bytes The uncompressed array.
+ * @return The compressed array.
+ * @throws IOException if an I/O error occurs.
+ */
+ public static byte[] bzip2(byte[] bytes) throws IOException {
+ ByteArrayOutputStream bout = new ByteArrayOutputStream();
+ BZip2CompressorOutputStream os = new BZip2CompressorOutputStream(bout, 1);
+ try {
+ os.write(bytes);
+ os.finish();
+ byte[] compressed = bout.toByteArray();
+ byte[] newCompressed = new byte[compressed.length - 4];
+ System.arraycopy(compressed, 4, newCompressed, 0, newCompressed.length);
+ return newCompressed;
+ } finally {
+ os.close();
+ }
+ }
+
+ /**
+ * Default private constructor to prevent instantiation.
+ */
+ private CompressionUtil() {
+
+ }
+
+}
diff --git a/Tools/Cache Editor/src/org/apollo/fs/util/ZipUtils.java b/Tools/Cache Editor/src/org/apollo/fs/util/ZipUtils.java
new file mode 100644
index 000000000..3009ba2c3
--- /dev/null
+++ b/Tools/Cache Editor/src/org/apollo/fs/util/ZipUtils.java
@@ -0,0 +1,50 @@
+package org.apollo.fs.util;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.util.zip.GZIPInputStream;
+
+public class ZipUtils {
+
+ /**
+ * Unzips a cache file.
+ *
+ * @param file
+ * The cache file.
+ * @return The unzipped byte buffer.
+ * @throws IOException
+ * if an I/O error occurs.
+ */
+ public static ByteBuffer unzip(ByteBuffer buffer) throws IOException {
+ byte[] data = new byte[buffer.remaining()];
+ buffer.get(data);
+ InputStream is = new GZIPInputStream(new ByteArrayInputStream(data));
+ byte[] out;
+ try {
+ ByteArrayOutputStream os = new ByteArrayOutputStream();
+ try {
+ while (true) {
+ byte[] buf = new byte[1024];
+ int read = is.read(buf, 0, buf.length);
+ if (read == -1) {
+ break;
+ }
+ os.write(buf, 0, read);
+ }
+ } finally {
+ os.close();
+ }
+ out = os.toByteArray();
+ } finally {
+ is.close();
+ }
+ ByteBuffer newBuf = ByteBuffer.allocate(out.length);
+ newBuf.put(out);
+ newBuf.flip();
+ return newBuf;
+ }
+}
+
diff --git a/Tools/Cache Editor/src/valkyrion/CachePacker.java b/Tools/Cache Editor/src/valkyrion/CachePacker.java
new file mode 100644
index 000000000..568b9f7fd
--- /dev/null
+++ b/Tools/Cache Editor/src/valkyrion/CachePacker.java
@@ -0,0 +1,132 @@
+package valkyrion;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+import javax.swing.JOptionPane;
+
+import com.alex.store.Store;
+import com.alex.utils.Constants;
+import com.alex.utils.Utils;
+
+public class CachePacker {
+ FileInputStream f2;
+
+ public static void replaceMidi(String cacheDir, int archiveId, int fileId, String convertedFileDir) throws IOException {
+ Store cache = new Store(cacheDir);
+ if (cache.getIndexes()[6].putFile(archiveId, fileId, getBytesFromFile(new File(convertedFileDir)))) {
+ JOptionPane.showMessageDialog(null, "MIDI packed successfully, if your client crashes on startup, use another midi and the backuped cache and try again.");
+ } else {
+ JOptionPane.showMessageDialog(null, "MIDI did not successfully pack!");
+ }
+ }
+ public static void main(String...args) throws Throwable {
+ String dir = "C:/Users/v4rg/Downloads/rs music/";
+ Store store = new Store("./498/");
+ Store from = new Store("./666/");
+ if (true) {
+ int index = 14;
+ System.out.println("To amount=" + store.getIndexes()[index].getValidArchivesCount());
+ System.out.println("From amount=" + from.getIndexes()[index].getValidArchivesCount());
+ int count = 0;
+ int fail = 0;
+ for (int archive = 0; archive < from.getIndexes()[index].getValidArchivesCount(); archive++) {
+ if (from.getIndexes()[index].archiveExists(archive)) {
+ byte[] data = from.getIndexes()[index].getFile(archive, 0);
+ if (data == null || data.length < 1) {
+ fail++;
+ // System.out.println("Invalid archive " + archive);
+ continue;
+ }
+ store.getIndexes()[index].putFile(archive, 0, data);
+ if (store.getIndexes()[index].getFile(archive, 0) != null) {
+ if (count++ % 100 == 0)
+ System.out.println("Packed music " + archive);
+ } else {
+ System.out.println("Failed to pack music " + archive);
+ fail++;
+ }
+ // continue;
+ } else {
+ fail++;
+ }
+ }
+ System.out.println("Packed " + count + "/" + (count + fail) + " music (" + 666 + ")!");
+ return;
+ }
+ // for (File f : new File(dir + "out/").listFiles()) {
+ // int index = Integer.parseInt(f.getName().replace(".mid", ""));
+ // boolean b = store.getIndexes()[6].putFile(index, 0, Constants.GZIP_COMPRESSION, getBytesFromFile(f), null, true, false, -1, -1);
+ // System.out.println(b ? "Successfully packed music " + index + "!" : "Failed to pack music " + index + "!");
+ // }
+ // store.getIndexes()[6].resetCachedFiles();
+ // BufferedWriter musicList = new BufferedWriter(new FileWriter("./music-list.txt"));
+ // new File(dir + "out/").mkdir();
+ // int index = 1;
+ // for (File f : new File(dir + "rs music/").listFiles()) {
+ // if (!f.getName().startsWith("runescape")) {
+ // continue;
+ // }
+ // System.out.println(f.getName());
+ // try {
+ // convertMidi(dir + "rs music/" + f.getName(), dir + "out/" + index + ".mid");
+ // musicList.append((index++) + ": " + f.getName().replace(".mid", ""));
+ // musicList.newLine();
+ // } catch (Throwable t) {
+ // t.printStackTrace();
+ // musicList.append((index++) + ": " + f.getName().replace(".mid", "") + " //FAILED!");
+ // musicList.newLine();
+ // }
+ // }
+ // musicList.flush();
+ // musicList.close();
+ }
+
+ public static void convertMidi(String input, String output) throws Exception {
+ MusicEncoder.convertMidi(input, output);
+ }
+
+ public static void addMusicFile(String cacheDir, String convertedFileDir, String musicName) throws IOException {
+ Store cache = new Store(cacheDir);
+ cache.getIndexes()[6].putFile(803, 0, Constants.GZIP_COMPRESSION, getBytesFromFile(new File(convertedFileDir)), null, true, false, Utils.getNameHash(musicName), -1);
+ }
+
+ public static byte[] getBytesFromFile(File file) throws IOException {
+ InputStream is = new FileInputStream(file);
+
+ // Get the size of the file
+ long length = file.length();
+
+ // You cannot create an array using a long type.
+ // It needs to be an int type.
+ // Before converting to an int type, check
+ // to ensure that file is not larger than Integer.MAX_VALUE.
+ if (length > Integer.MAX_VALUE) {
+ // File is too large
+ }
+
+ // Create the byte array to hold the data
+ byte[] bytes = new byte[(int)length];
+
+ // Read in the bytes
+ int offset = 0;
+ int numRead = 0;
+ while (offset < bytes.length
+ && (numRead=is.read(bytes, offset, bytes.length-offset)) >= 0) {
+ offset += numRead;
+ }
+
+ // Ensure all the bytes have been read in
+ if (offset < bytes.length) {
+ is.close();
+ throw new IOException("Could not completely read file "+file.getName());
+ }
+
+ // Close the input stream and return bytes
+ is.close();
+ return bytes;
+ }
+
+}
diff --git a/Tools/Cache Editor/src/valkyrion/MusicEncoder.java b/Tools/Cache Editor/src/valkyrion/MusicEncoder.java
new file mode 100644
index 000000000..881e4a35e
--- /dev/null
+++ b/Tools/Cache Editor/src/valkyrion/MusicEncoder.java
@@ -0,0 +1,487 @@
+package valkyrion;
+
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+
+import javax.sound.midi.MetaMessage;
+import javax.sound.midi.MidiEvent;
+import javax.sound.midi.MidiMessage;
+import javax.sound.midi.MidiSystem;
+import javax.sound.midi.Sequence;
+import javax.sound.midi.ShortMessage;
+import javax.sound.midi.Track;
+import javax.swing.JOptionPane;
+
+/**
+ * Converts a MIDI file to the runescape format
+ *
+ * NOTE: Jagex doesn't use the default soundbank,
+ * they have multiple soundbanks and their own instruments located in
+ * idx15 that use sound effects as their notes (idx4/14)
+ * For this reason some midi files might sound different although most of their
+ * first soundbank matches the default soundbank instruments
+ *
+ * @author Vincent
+ *
+ */
+public class MusicEncoder {
+
+ public static final int NOTE_OFF = 0x80;
+ public static final int NOTE_ON = 0x90;
+ public static final int KEY_AFTER_TOUCH = 0xA0;
+ public static final int CONTROL_CHANGE = 0xB0;
+ public static final int PROGRAM_CHANGE = 0xC0;
+ public static final int CHANNEL_AFTER_TOUCH = 0xD0;
+ public static final int PITCH_WHEEL_CHANGE = 0xE0;
+
+ public static final int END_OF_TRACK = 0x2F;
+ public static final int SET_TEMPO = 0x51;
+
+ public static void convertMidi(String input, String output) throws Exception {
+ Sequence sequence = MidiSystem.getSequence(new File(input));
+ DataOutputStream dos = new DataOutputStream(new FileOutputStream(output));
+
+ //this could be done with a lot less loops and using multiple buffers instead
+
+ //write event opcodes with channel
+ for (Track track : sequence.getTracks()) {
+ int prevChannel = 0;
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ int ch = (sm.getChannel() ^ prevChannel) << 4;
+ switch(sm.getCommand()) {
+ case NOTE_OFF:
+ dos.write(1 | ch);
+ prevChannel = sm.getChannel();
+ break;
+ case NOTE_ON:
+ dos.write(0 | ch);
+ prevChannel = sm.getChannel();
+ break;
+ case KEY_AFTER_TOUCH:
+ dos.write(5 | ch);
+ prevChannel = sm.getChannel();
+ break;
+ case CONTROL_CHANGE:
+ dos.write(2 | ch);
+ prevChannel = sm.getChannel();
+ break;
+ case PROGRAM_CHANGE:
+ dos.write(6 | ch);
+ prevChannel = sm.getChannel();
+ break;
+ case CHANNEL_AFTER_TOUCH:
+ dos.write(4 | ch);
+ prevChannel = sm.getChannel();
+ break;
+ case PITCH_WHEEL_CHANGE:
+ dos.write(3 | ch);
+ prevChannel = sm.getChannel();
+ break;
+ }
+ } else if(message instanceof MetaMessage) {
+ MetaMessage mm = (MetaMessage) message;
+ switch(mm.getType()) {
+ case END_OF_TRACK:
+ dos.write(7);
+ break;
+ case SET_TEMPO:
+ dos.write(23);
+ break;
+ default:
+ //OTHER META EVENTS ARE IGNORED
+ break;
+ }
+ } else {
+ //SYSEX MESSAGES ARE IGNORED
+ }
+ }
+
+ }
+
+ //write event timestamp for used opcodes
+ for (Track track : sequence.getTracks()) {
+ int lastTick = 0;
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ switch(sm.getCommand()) {
+ case NOTE_OFF:
+ case NOTE_ON:
+ case KEY_AFTER_TOUCH:
+ case CONTROL_CHANGE:
+ case PROGRAM_CHANGE:
+ case CHANNEL_AFTER_TOUCH:
+ case PITCH_WHEEL_CHANGE:
+ putVariableInt(dos, (int)event.getTick() - lastTick);
+ lastTick = (int) event.getTick();
+ break;
+ }
+ } else if(message instanceof MetaMessage) {
+ MetaMessage mm = (MetaMessage) message;
+ switch(mm.getType()) {
+ case END_OF_TRACK:
+ case SET_TEMPO:
+ putVariableInt(dos, (int)event.getTick() - lastTick);
+ lastTick = (int) event.getTick();
+ break;
+ }
+ }
+ }
+ }
+
+ //jagex works with offset from the last one because this is usually 0 and gives better compression rates
+ int lastController = 0;
+ int lastNote = 0;
+ int lastNoteOnVelocity = 0;
+ int lastNoteOffVelocity = 0;
+ int lastWheelChangeT = 0;
+ int lastWheelChangeB = 0;
+ int lastChannelAfterTouch = 0;
+ int lastKeyAfterTouchVelocity = 0;
+
+ //write controller number changes
+ int[] lastControllerValue = new int[128];
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE) {
+ dos.write(sm.getData1() - lastController);
+ lastController = sm.getData1();
+ }
+ }
+ }
+ }
+
+ //controller 64 65 120 121 123 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && (sm.getData1() == 64 || sm.getData1() == 65 || sm.getData1() == 120 || sm.getData1() == 121 || sm.getData1() == 123)) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //key after touch velocity changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == KEY_AFTER_TOUCH) {
+ dos.write(sm.getData2() - lastKeyAfterTouchVelocity);
+ lastKeyAfterTouchVelocity = sm.getData2();
+ }
+ }
+ }
+ }
+ //channel after touch channel changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CHANNEL_AFTER_TOUCH) {
+ dos.write(sm.getData1() - lastChannelAfterTouch);
+ lastChannelAfterTouch = sm.getData1();
+ }
+ }
+ }
+ }
+ //pitch bend top values
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == PITCH_WHEEL_CHANGE) {
+ dos.write(sm.getData2() - lastWheelChangeT);
+ lastWheelChangeT = sm.getData2();
+ }
+ }
+ }
+ }
+ //controller 1 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && sm.getData1() == 1) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //controller 7 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && sm.getData1() == 7) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //controller 10 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && sm.getData1() == 10) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //note changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == NOTE_OFF || sm.getCommand() == NOTE_ON || sm.getCommand() == KEY_AFTER_TOUCH) {
+ dos.write(sm.getData1() - lastNote);
+ lastNote = sm.getData1();
+ }
+ }
+ }
+ }
+ //note on velocity changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == NOTE_ON) {
+ dos.write(sm.getData2() - lastNoteOnVelocity);
+ lastNoteOnVelocity = sm.getData2();
+ }
+ }
+ }
+ }
+ //all unlisted controller changes (controllers are probably grouped like this because it gives an even better compression)
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && !(sm.getData1() == 64 || sm.getData1() == 65 || sm.getData1() == 120 || sm.getData1() == 121 || sm.getData1() == 123 || sm.getData1() == 0 || sm.getData1() == 32 || sm.getData1() == 1 || sm.getData1() == 33 || sm.getData1() == 7 || sm.getData1() == 39 || sm.getData1() == 10 || sm.getData1() == 42 || sm.getData1() == 99 || sm.getData1() == 98 || sm.getData1() == 101 || sm.getData1() == 100)) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //note off velocity changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == NOTE_OFF) {
+ dos.write(sm.getData2() - lastNoteOffVelocity);
+ lastNoteOffVelocity = sm.getData2();
+ }
+ }
+ }
+ }
+ //controller 33 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && sm.getData1() == 33) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //controller 39 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && sm.getData1() == 39) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //controller 42 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && sm.getData1() == 42) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //controller 0, 32 and program changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && (sm.getData1() == 0 || sm.getData1() == 32)) {
+ JOptionPane.showMessageDialog(null, "WARNING SONG USES SOUND BANKS BYTE: "+sm.getData1()+" VALUE: "+sm.getData2()+" ");
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ } else if(sm.getCommand() == PROGRAM_CHANGE) {
+ dos.write(sm.getData1());
+ }
+ }
+ }
+ }
+ //pitch bend bottom changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == PITCH_WHEEL_CHANGE) {
+ dos.write(sm.getData1() - lastWheelChangeB);
+ lastWheelChangeB = sm.getData1();
+ }
+ }
+ }
+ }
+ //controller 99 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && sm.getData1() == 99) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //controller 98 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && sm.getData1() == 98) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //controller 101 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && sm.getData1() == 101) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //controller 100 changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof ShortMessage) {
+ ShortMessage sm = (ShortMessage) message;
+ if(sm.getCommand() == CONTROL_CHANGE && sm.getData1() == 100) {
+ dos.write(sm.getData2() - lastControllerValue[sm.getData1()]);
+ lastControllerValue[sm.getData1()] = sm.getData2();
+ }
+ }
+ }
+ }
+ //tempo changes
+ for (Track track : sequence.getTracks()) {
+ for (int i=0; i < track.size(); i++) {
+ MidiEvent event = track.get(i);
+ MidiMessage message = event.getMessage();
+ if (message instanceof MetaMessage) {
+ MetaMessage mm = (MetaMessage) message;
+ if(mm.getType() == SET_TEMPO) {
+ dos.write(mm.getData());
+ }
+ }
+ }
+ }
+ //write footer
+ dos.write(sequence.getTracks().length);
+ dos.writeShort(sequence.getResolution());
+
+ dos.flush();
+ dos.close();
+
+ }
+
+ static final void putVariableInt(DataOutputStream dos, int value) throws IOException {
+ if ((value & ~0x7f) != 0) {
+ if ((value & ~0x3fff) != 0) {
+ if ((~0x1fffff & value) != 0) {
+ if ((~0xfffffff & value) != 0) {
+ dos.write(value >>> 28 | 0x80);
+ }
+ dos.write(value >>> 21 | 0x80);
+ }
+ dos.write(value >>> 14 | 0x80);
+ }
+ dos.write(value >>> 7 | 0x80);
+ }
+ dos.write(0x7f & value);
+ }
+
+}
\ No newline at end of file
diff --git a/Tools/Cache Editor/src/valkyrion/PackerGUI.java b/Tools/Cache Editor/src/valkyrion/PackerGUI.java
new file mode 100644
index 000000000..bfe37aeab
--- /dev/null
+++ b/Tools/Cache Editor/src/valkyrion/PackerGUI.java
@@ -0,0 +1,115 @@
+package valkyrion;
+
+import java.awt.EventQueue;
+import java.awt.event.ActionEvent;
+import java.awt.event.ActionListener;
+
+import javax.swing.JButton;
+import javax.swing.JDialog;
+import javax.swing.JFileChooser;
+import javax.swing.JFrame;
+import javax.swing.JLabel;
+import javax.swing.JPanel;
+import javax.swing.JTextField;
+import javax.swing.UIManager;
+import javax.swing.UnsupportedLookAndFeelException;
+import javax.swing.border.EmptyBorder;
+import javax.swing.filechooser.FileNameExtensionFilter;
+
+import org.jvnet.substance.skin.SubstanceRavenGraphiteLookAndFeel;
+
+@SuppressWarnings("serial")
+public class PackerGUI extends JFrame {
+
+ public JPanel contentPane;
+ public JTextField input = new JTextField();
+ public JTextField cacheDir = new JTextField();
+ public JTextField musicId = new JTextField();
+ public JLabel lblInput = new JLabel("MIDI location:");
+ public JLabel lblCacheDir = new JLabel("Cache location:");
+ public JLabel lblMusicId = new JLabel("Music Id (has to be an int, e.g 0 is login music):");
+ public JFileChooser filePicker = new JFileChooser();
+ public JFileChooser filePicker2 = new JFileChooser();
+ public JButton btnFilePick, btnFilePick2, btnPack;
+
+ public PackerGUI() {
+ setTitle("Music Packer/Replacer");
+ setSize(450, 400);
+ setLocationRelativeTo(null);
+ setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+ contentPane = new JPanel();
+ contentPane.setBorder(new EmptyBorder(5, 5, 5, 5));
+ setContentPane(contentPane);
+ contentPane.setLayout(null);
+
+ btnFilePick = new JButton("Browse");
+ btnFilePick.setBounds(330, 50, 90, 25);
+ btnFilePick2 = new JButton("Browse");
+ btnFilePick2.setBounds(330, 120, 90, 25);
+ btnPack = new JButton("Pack to Cache");
+ btnPack.setBounds(140, 240, 120, 60);
+ input.setBounds(25, 50, 300, 30);
+ cacheDir.setBounds(25, 120, 300, 30);
+ musicId.setBounds(25, 190, 300, 30);
+ lblCacheDir.setBounds(25, 90, 300, 30);
+ lblInput.setBounds(25, 15, 300, 30);
+ lblMusicId.setBounds(25, 160, 300, 30);
+ contentPane.add(lblCacheDir);
+ contentPane.add(cacheDir);
+ contentPane.add(lblMusicId);
+ contentPane.add(lblInput);
+ contentPane.add(btnFilePick);
+ contentPane.add(btnFilePick2);
+ contentPane.add(musicId);
+ contentPane.add(btnPack);
+ contentPane.add(input);
+ filePicker.setFileFilter(new FileNameExtensionFilter("MIDI Files", "mid"));
+
+ btnFilePick.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+ //filePicker.addChoosableFileFilter();
+ filePicker.showOpenDialog(null);
+ input.setText(filePicker.getSelectedFile().getAbsolutePath());
+ }
+ });
+ btnFilePick2.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+ filePicker2.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
+ filePicker2.showOpenDialog(null);
+ cacheDir.setText(filePicker2.getSelectedFile().getAbsolutePath());
+ }
+ });
+
+ btnPack.addActionListener(new ActionListener() {
+ public void actionPerformed(ActionEvent e) {
+ try {
+ CachePacker.convertMidi(input.getText().toString(), System.getProperty("user.home") + "/tempout");
+ CachePacker.replaceMidi(cacheDir.getText().toString() + "/", Integer.parseInt(musicId.getText().toString()), 0, System.getProperty("user.home") + "/tempout");
+ } catch (Exception e1) {
+ // TODO Auto-generated catch block
+ e1.printStackTrace();
+ }
+ }
+ });
+ }
+
+ public static void main(String[] args) {
+ JFrame.setDefaultLookAndFeelDecorated(true);
+ JDialog.setDefaultLookAndFeelDecorated(true);
+
+ EventQueue.invokeLater(new Runnable() {
+
+ public void run() {
+ try {
+ UIManager.setLookAndFeel(new SubstanceRavenGraphiteLookAndFeel());
+ } catch (UnsupportedLookAndFeelException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ new PackerGUI().setVisible(true);
+ }
+
+ });
+ }
+
+}
diff --git a/Tools/Cache Editor/substance-5.3.jar b/Tools/Cache Editor/substance-5.3.jar
new file mode 100644
index 000000000..3c6359aa2
Binary files /dev/null and b/Tools/Cache Editor/substance-5.3.jar differ