diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index f12fbcb3..e13e299d 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -6,7 +6,10 @@
// Sets the run context to one level up instead of the .devcontainer folder.
"context": "..",
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
- "dockerfile": "../Dockerfile"
+ "dockerfile": "../Dockerfile",
+ "args": {
+ "INSTALL_GIT": "true"
+ }
},
// Features to add to the dev container. More info: https://containers.dev/features.
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..5ace4600
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,6 @@
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
index d3f2789c..321f8233 100644
--- a/.github/workflows/pre-commit.yml
+++ b/.github/workflows/pre-commit.yml
@@ -5,9 +5,9 @@ jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: Set up Python
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v5
with:
python-version: "3.x"
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 8aa6d189..c4dbdcfd 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -5,8 +5,8 @@ jobs:
tests:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
with:
python-version: |
3.10
@@ -14,7 +14,7 @@ jobs:
3.12
- name: Set up pip cache
if: runner.os == 'Linux'
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('pyproject.toml') }}
diff --git a/.gitignore b/.gitignore
index 82f92755..7f0de2ba 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,5 @@
+.vscode
+
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
@@ -160,3 +162,5 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
+src/.DS_Store
+.DS_Store
diff --git a/Dockerfile b/Dockerfile
index f9c0bef0..0072d9e3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -2,10 +2,15 @@ FROM python:3.13-slim-bullseye
USER root
+ARG INSTALL_GIT=false
+RUN if [ "$INSTALL_GIT" = "true" ]; then \
+ apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*; \
+ fi
+
# Runtime dependency
RUN apt-get update && apt-get install -y --no-install-recommends \
ffmpeg \
- && rm -rf /var/lib/apt/lists/*
+ && rm -rf /var/lib/apt/lists/*
RUN pip install markitdown
diff --git a/README.md b/README.md
index 75c2ba05..6bc91e6c 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,9 @@
# MarkItDown
[![PyPI](https://img.shields.io/pypi/v/markitdown.svg)](https://pypi.org/project/markitdown/)
+![PyPI - Downloads](https://img.shields.io/pypi/dd/markitdown)
+[![Built by AutoGen Team](https://img.shields.io/badge/Built%20by-AutoGen%20Team-blue)](https://github.com/microsoft/autogen)
+
MarkItDown is a utility for converting various files to Markdown (e.g., for indexing, text analysis, etc).
It supports:
@@ -24,6 +27,12 @@ To install MarkItDown, use pip: `pip install markitdown`. Alternatively, you can
markitdown path-to-file.pdf > document.md
```
+Or use `-o` to specify the output file:
+
+```bash
+markitdown path-to-file.pdf -o document.md
+```
+
You can also pipe content:
```bash
@@ -60,7 +69,43 @@ print(result.text_content)
docker build -t markitdown:latest .
docker run --rm -i markitdown:latest < ~/your-file.pdf > output.md
```
+
+
+Batch Processing Multiple Files
+
+This example shows how to convert multiple files to markdown format in a single run. The script processes all supported files in a directory and creates corresponding markdown files.
+
+
+```python convert.py
+from markitdown import MarkItDown
+from openai import OpenAI
+import os
+client = OpenAI(api_key="your-api-key-here")
+md = MarkItDown(llm_client=client, llm_model="gpt-4o-2024-11-20")
+supported_extensions = ('.pptx', '.docx', '.pdf', '.jpg', '.jpeg', '.png')
+files_to_convert = [f for f in os.listdir('.') if f.lower().endswith(supported_extensions)]
+for file in files_to_convert:
+ print(f"\nConverting {file}...")
+ try:
+ md_file = os.path.splitext(file)[0] + '.md'
+ result = md.convert(file)
+ with open(md_file, 'w') as f:
+ f.write(result.text_content)
+
+ print(f"Successfully converted {file} to {md_file}")
+ except Exception as e:
+ print(f"Error converting {file}: {str(e)}")
+
+print("\nAll conversions completed!")
+```
+2. Place the script in the same directory as your files
+3. Install required packages: like openai
+4. Run script ```bash python convert.py ```
+
+Note that original files will remain unchanged and new markdown files are created with the same base name.
+
+
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
@@ -75,6 +120,20 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
+### How to Contribute
+
+You can help by looking at issues or helping review PRs. Any issue or PR is welcome, but we have also marked some as 'open for contribution' and 'open for reviewing' to help facilitate community contributions. These are ofcourse just suggestions and you are welcome to contribute in any way you like.
+
+
+
+
+| | All | Especially Needs Help from Community |
+|-----------------------|------------------------------------------|------------------------------------------------------------------------------------------|
+| **Issues** | [All Issues](https://github.com/microsoft/markitdown/issues) | [Issues open for contribution](https://github.com/microsoft/markitdown/issues?q=is%3Aissue+is%3Aopen+label%3A%22open+for+contribution%22) |
+| **PRs** | [All PRs](https://github.com/microsoft/markitdown/pulls) | [PRs open for reviewing](https://github.com/microsoft/markitdown/pulls?q=is%3Apr+is%3Aopen+label%3A%22open+for+reviewing%22) |
+
+
+
### Running Tests and Checks
- Install `hatch` in your environment and run tests:
diff --git a/pyproject.toml b/pyproject.toml
index c5bd58ba..9c113ade 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,7 @@ build-backend = "hatchling.build"
[project]
name = "markitdown"
dynamic = ["version"]
-description = ''
+description = 'Utility tool for converting various files to Markdown'
readme = "README.md"
requires-python = ">=3.10"
license = "MIT"
@@ -32,9 +32,11 @@ dependencies = [
"python-pptx",
"pandas",
"openpyxl",
+ "xlrd",
"pdfminer.six",
"puremagic",
"pydub",
+ "olefile",
"youtube-transcript-api",
"SpeechRecognition",
"pathvalidate",
diff --git a/src/markitdown/__main__.py b/src/markitdown/__main__.py
index 2d531735..b6cf963b 100644
--- a/src/markitdown/__main__.py
+++ b/src/markitdown/__main__.py
@@ -1,45 +1,80 @@
# SPDX-FileCopyrightText: 2024-present Adam Fourney
#
# SPDX-License-Identifier: MIT
-import sys
import argparse
-from ._markitdown import MarkItDown
+import sys
+from textwrap import dedent
+from .__about__ import __version__
+from ._markitdown import MarkItDown, DocumentConverterResult
def main():
parser = argparse.ArgumentParser(
description="Convert various file formats to markdown.",
+ prog="markitdown",
formatter_class=argparse.RawDescriptionHelpFormatter,
- usage="""
-SYNTAX:
-
- markitdown
- If FILENAME is empty, markitdown reads from stdin.
+ usage=dedent(
+ """
+ SYNTAX:
-EXAMPLE:
-
- markitdown example.pdf
-
- OR
+ markitdown
+ If FILENAME is empty, markitdown reads from stdin.
+
+ EXAMPLE:
+
+ markitdown example.pdf
+
+ OR
- cat example.pdf | markitdown
+ cat example.pdf | markitdown
- OR
+ OR
- markitdown < example.pdf
-""".strip(),
+ markitdown < example.pdf
+
+ OR to save to a file use
+
+ markitdown example.pdf -o example.md
+
+ OR
+
+ markitdown example.pdf > example.md
+ """
+ ).strip(),
+ )
+
+ parser.add_argument(
+ "-v",
+ "--version",
+ action="version",
+ version=f"%(prog)s {__version__}",
+ help="show the version number and exit",
)
parser.add_argument("filename", nargs="?")
+ parser.add_argument(
+ "-o",
+ "--output",
+ help="Output file name. If not provided, output is written to stdout.",
+ )
args = parser.parse_args()
if args.filename is None:
markitdown = MarkItDown()
result = markitdown.convert_stream(sys.stdin.buffer)
- print(result.text_content)
+ _handle_output(args, result)
else:
markitdown = MarkItDown()
result = markitdown.convert(args.filename)
+ _handle_output(args, result)
+
+
+def _handle_output(args, result: DocumentConverterResult):
+ """Handle output to stdout or file"""
+ if args.output:
+ with open(args.output, "w", encoding="utf-8") as f:
+ f.write(result.text_content)
+ else:
print(result.text_content)
diff --git a/src/markitdown/_markitdown.py b/src/markitdown/_markitdown.py
index 63b41309..4b8220f2 100644
--- a/src/markitdown/_markitdown.py
+++ b/src/markitdown/_markitdown.py
@@ -15,6 +15,7 @@
import zipfile
from xml.dom import minidom
from typing import Any, Dict, List, Optional, Union
+from pathlib import Path
from urllib.parse import parse_qs, quote, unquote, urlparse, urlunparse
from warnings import warn, resetwarnings, catch_warnings
from email import policy
@@ -23,6 +24,7 @@
import mammoth
import markdownify
+import olefile
import pandas as pd
import pdfminer
import pdfminer.high_level
@@ -35,6 +37,7 @@
from charset_normalizer import from_path
# Optional Transcription support
+IS_AUDIO_TRANSCRIPTION_CAPABLE = False
try:
# Using warnings' catch_warnings to catch
# pydub's warning of ffmpeg or avconv missing
@@ -173,7 +176,10 @@ def convert(
# Only accept text files
if content_type is None:
return None
- elif "text/" not in content_type.lower():
+ elif all(
+ not content_type.lower().startswith(type_prefix)
+ for type_prefix in ["text/", "application/json"]
+ ):
return None
text_content = str(from_path(local_path).best())
@@ -726,7 +732,31 @@ def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
if extension.lower() != ".xlsx":
return None
- sheets = pd.read_excel(local_path, sheet_name=None)
+ sheets = pd.read_excel(local_path, sheet_name=None, engine="openpyxl")
+ md_content = ""
+ for s in sheets:
+ md_content += f"## {s}\n"
+ html_content = sheets[s].to_html(index=False)
+ md_content += self._convert(html_content).text_content.strip() + "\n\n"
+
+ return DocumentConverterResult(
+ title=None,
+ text_content=md_content.strip(),
+ )
+
+
+class XlsConverter(HtmlConverter):
+ """
+ Converts XLS files to Markdown, with each sheet presented as a separate Markdown table.
+ """
+
+ def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
+ # Bail if not a XLS
+ extension = kwargs.get("file_extension", "")
+ if extension.lower() != ".xls":
+ return None
+
+ sheets = pd.read_excel(local_path, sheet_name=None, engine="xlrd")
md_content = ""
for s in sheets:
md_content += f"## {s}\n"
@@ -865,14 +895,25 @@ class MediaConverter(DocumentConverter):
Abstract class for multi-modal media (e.g., images and audio)
"""
- def _get_metadata(self, local_path):
- exiftool = shutil.which("exiftool")
- if not exiftool:
+ def _get_metadata(self, local_path, exiftool_path=None):
+ if not exiftool_path:
+ which_exiftool = shutil.which("exiftool")
+ if which_exiftool:
+ warn(
+ f"""Implicit discovery of 'exiftool' is disabled. If you would like to continue to use exiftool in MarkItDown, please set the exiftool_path parameter in the MarkItDown consructor. E.g.,
+
+ md = MarkItDown(exiftool_path="{which_exiftool}")
+
+This warning will be removed in future releases.
+""",
+ DeprecationWarning,
+ )
+
return None
else:
try:
result = subprocess.run(
- [exiftool, "-json", local_path], capture_output=True, text=True
+ [exiftool_path, "-json", local_path], capture_output=True, text=True
).stdout
return json.loads(result)[0]
except Exception:
@@ -893,7 +934,7 @@ def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
md_content = ""
# Add metadata
- metadata = self._get_metadata(local_path)
+ metadata = self._get_metadata(local_path, kwargs.get("exiftool_path"))
if metadata:
for f in [
"Title",
@@ -948,7 +989,7 @@ def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
md_content = ""
# Add metadata
- metadata = self._get_metadata(local_path)
+ metadata = self._get_metadata(local_path, kwargs.get("exiftool_path"))
if metadata:
for f in [
"Title",
@@ -1009,7 +1050,7 @@ def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
md_content = ""
# Add metadata
- metadata = self._get_metadata(local_path)
+ metadata = self._get_metadata(local_path, kwargs.get("exiftool_path"))
if metadata:
for f in [
"ImageSize",
@@ -1078,6 +1119,79 @@ def _get_llm_description(self, local_path, extension, client, model, prompt=None
return response.choices[0].message.content
+class OutlookMsgConverter(DocumentConverter):
+ """Converts Outlook .msg files to markdown by extracting email metadata and content.
+
+ Uses the olefile package to parse the .msg file structure and extract:
+ - Email headers (From, To, Subject)
+ - Email body content
+ """
+
+ def convert(
+ self, local_path: str, **kwargs: Any
+ ) -> Union[None, DocumentConverterResult]:
+ # Bail if not a MSG file
+ extension = kwargs.get("file_extension", "")
+ if extension.lower() != ".msg":
+ return None
+
+ try:
+ msg = olefile.OleFileIO(local_path)
+ # Extract email metadata
+ md_content = "# Email Message\n\n"
+
+ # Get headers
+ headers = {
+ "From": self._get_stream_data(msg, "__substg1.0_0C1F001F"),
+ "To": self._get_stream_data(msg, "__substg1.0_0E04001F"),
+ "Subject": self._get_stream_data(msg, "__substg1.0_0037001F"),
+ }
+
+ # Add headers to markdown
+ for key, value in headers.items():
+ if value:
+ md_content += f"**{key}:** {value}\n"
+
+ md_content += "\n## Content\n\n"
+
+ # Get email body
+ body = self._get_stream_data(msg, "__substg1.0_1000001F")
+ if body:
+ md_content += body
+
+ msg.close()
+
+ return DocumentConverterResult(
+ title=headers.get("Subject"), text_content=md_content.strip()
+ )
+
+ except Exception as e:
+ raise FileConversionException(
+ f"Could not convert MSG file '{local_path}': {str(e)}"
+ )
+
+ def _get_stream_data(
+ self, msg: olefile.OleFileIO, stream_path: str
+ ) -> Union[str, None]:
+ """Helper to safely extract and decode stream data from the MSG file."""
+ try:
+ if msg.exists(stream_path):
+ data = msg.openstream(stream_path).read()
+ # Try UTF-16 first (common for .msg files)
+ try:
+ return data.decode("utf-16-le").strip()
+ except UnicodeDecodeError:
+ # Fall back to UTF-8
+ try:
+ return data.decode("utf-8").strip()
+ except UnicodeDecodeError:
+ # Last resort - ignore errors
+ return data.decode("utf-8", errors="ignore").strip()
+ except Exception:
+ pass
+ return None
+
+
class EmlConverter(DocumentConverter):
"""Converts EML (email) files to Markdown. Preserves headers, body, and attachments info."""
@@ -1226,27 +1340,33 @@ def convert(
extracted_zip_folder_name = (
f"extracted_{os.path.basename(local_path).replace('.zip', '_zip')}"
)
- new_folder = os.path.normpath(
+ extraction_dir = os.path.normpath(
os.path.join(os.path.dirname(local_path), extracted_zip_folder_name)
)
md_content = f"Content from the zip file `{os.path.basename(local_path)}`:\n\n"
- # Safety check for path traversal
- if not new_folder.startswith(os.path.dirname(local_path)):
- return DocumentConverterResult(
- title=None, text_content=f"[ERROR] Invalid zip file path: {local_path}"
- )
-
try:
- # Extract the zip file
+ # Extract the zip file safely
with zipfile.ZipFile(local_path, "r") as zipObj:
- zipObj.extractall(path=new_folder)
+ # Safeguard against path traversal
+ for member in zipObj.namelist():
+ member_path = os.path.normpath(os.path.join(extraction_dir, member))
+ if (
+ not os.path.commonprefix([extraction_dir, member_path])
+ == extraction_dir
+ ):
+ raise ValueError(
+ f"Path traversal detected in zip file: {member}"
+ )
+
+ # Extract all files safely
+ zipObj.extractall(path=extraction_dir)
# Process each extracted file
- for root, dirs, files in os.walk(new_folder):
+ for root, dirs, files in os.walk(extraction_dir):
for name in files:
file_path = os.path.join(root, name)
- relative_path = os.path.relpath(file_path, new_folder)
+ relative_path = os.path.relpath(file_path, extraction_dir)
# Get file extension
_, file_extension = os.path.splitext(name)
@@ -1270,7 +1390,7 @@ def convert(
# Clean up extracted files if specified
if kwargs.get("cleanup_extracted", True):
- shutil.rmtree(new_folder)
+ shutil.rmtree(extraction_dir)
return DocumentConverterResult(title=None, text_content=md_content.strip())
@@ -1279,6 +1399,11 @@ def convert(
title=None,
text_content=f"[ERROR] Invalid or corrupted zip file: {local_path}",
)
+ except ValueError as ve:
+ return DocumentConverterResult(
+ title=None,
+ text_content=f"[ERROR] Security error in zip file {local_path}: {str(ve)}",
+ )
except Exception as e:
return DocumentConverterResult(
title=None,
@@ -1304,6 +1429,7 @@ def __init__(
llm_client: Optional[Any] = None,
llm_model: Optional[str] = None,
style_map: Optional[str] = None,
+ exiftool_path: Optional[str] = None,
# Deprecated
mlm_client: Optional[Any] = None,
mlm_model: Optional[str] = None,
@@ -1313,6 +1439,9 @@ def __init__(
else:
self._requests_session = requests_session
+ if exiftool_path is None:
+ exiftool_path = os.environ.get("EXIFTOOL_PATH")
+
# Handle deprecation notices
#############################
if mlm_client is not None:
@@ -1345,6 +1474,7 @@ def __init__(
self._llm_client = llm_client
self._llm_model = llm_model
self._style_map = style_map
+ self._exiftool_path = exiftool_path
self._page_converters: List[DocumentConverter] = []
@@ -1359,6 +1489,7 @@ def __init__(
self.register_page_converter(BingSerpConverter())
self.register_page_converter(DocxConverter())
self.register_page_converter(XlsxConverter())
+ self.register_page_converter(XlsConverter())
self.register_page_converter(PptxConverter())
self.register_page_converter(WavConverter())
self.register_page_converter(Mp3Converter())
@@ -1366,14 +1497,15 @@ def __init__(
self.register_page_converter(IpynbConverter())
self.register_page_converter(PdfConverter())
self.register_page_converter(ZipConverter())
+ self.register_page_converter(OutlookMsgConverter())
self.register_page_converter(EmlConverter())
def convert(
- self, source: Union[str, requests.Response], **kwargs: Any
+ self, source: Union[str, requests.Response, Path], **kwargs: Any
) -> DocumentConverterResult: # TODO: deal with kwargs
"""
Args:
- - source: can be a string representing a path or url, or a requests.response object
+ - source: can be a string representing a path either as string pathlib path object or url, or a requests.response object
- extension: specifies the file extension to use when interpreting the file. If None, infer from source (path, uri, content-type, etc.)
"""
@@ -1390,10 +1522,14 @@ def convert(
# Request response
elif isinstance(source, requests.Response):
return self.convert_response(source, **kwargs)
+ elif isinstance(source, Path):
+ return self.convert_local(source, **kwargs)
def convert_local(
- self, path: str, **kwargs: Any
+ self, path: Union[str, Path], **kwargs: Any
) -> DocumentConverterResult: # TODO: deal with kwargs
+ if isinstance(path, Path):
+ path = str(path)
# Prepare a list of extensions to try (in order of priority)
ext = kwargs.get("file_extension")
extensions = [ext] if ext is not None else []
@@ -1523,12 +1659,15 @@ def _convert(
if "llm_model" not in _kwargs and self._llm_model is not None:
_kwargs["llm_model"] = self._llm_model
- # Add the list of converters for nested processing
- _kwargs["_parent_converters"] = self._page_converters
-
if "style_map" not in _kwargs and self._style_map is not None:
_kwargs["style_map"] = self._style_map
+ if "exiftool_path" not in _kwargs and self._exiftool_path is not None:
+ _kwargs["exiftool_path"] = self._exiftool_path
+
+ # Add the list of converters for nested processing
+ _kwargs["_parent_converters"] = self._page_converters
+
# If we hit an error log it and keep trying
try:
res = converter.convert(local_path, **_kwargs)
@@ -1571,6 +1710,25 @@ def _guess_ext_magic(self, path):
# Use puremagic to guess
try:
guesses = puremagic.magic_file(path)
+
+ # Fix for: https://github.com/microsoft/markitdown/issues/222
+ # If there are no guesses, then try again after trimming leading ASCII whitespaces.
+ # ASCII whitespace characters are those byte values in the sequence b' \t\n\r\x0b\f'
+ # (space, tab, newline, carriage return, vertical tab, form feed).
+ if len(guesses) == 0:
+ with open(path, "rb") as file:
+ while True:
+ char = file.read(1)
+ if not char: # End of file
+ break
+ if not char.isspace():
+ file.seek(file.tell() - 1)
+ break
+ try:
+ guesses = puremagic.magic_stream(file)
+ except puremagic.main.PureError:
+ pass
+
extensions = list()
for g in guesses:
ext = g.extension.strip()
diff --git a/src/markitdown/py.typed b/src/markitdown/py.typed
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_files/test.json b/tests/test_files/test.json
new file mode 100644
index 00000000..eba30594
--- /dev/null
+++ b/tests/test_files/test.json
@@ -0,0 +1,10 @@
+{
+ "key1": "string_value",
+ "key2": 1234,
+ "key3": [
+ "list_value1",
+ "list_value2"
+ ],
+ "5b64c88c-b3c3-4510-bcb8-da0b200602d8": "uuid_key",
+ "uuid_value": "9700dc99-6685-40b4-9a3a-5e406dcb37f3"
+}
diff --git a/tests/test_files/test.xls b/tests/test_files/test.xls
new file mode 100644
index 00000000..de4f368c
Binary files /dev/null and b/tests/test_files/test.xls differ
diff --git a/tests/test_files/test_outlook_msg.msg b/tests/test_files/test_outlook_msg.msg
new file mode 100644
index 00000000..05b087b7
Binary files /dev/null and b/tests/test_files/test_outlook_msg.msg differ
diff --git a/tests/test_markitdown.py b/tests/test_markitdown.py
index 7a7be55b..8be28162 100644
--- a/tests/test_markitdown.py
+++ b/tests/test_markitdown.py
@@ -54,6 +54,12 @@
"affc7dad-52dc-4b98-9b5d-51e65d8a8ad0",
]
+XLS_TEST_STRINGS = [
+ "## 09060124-b5e7-4717-9d07-3c046eb",
+ "6ff4173b-42a5-4784-9b19-f49caff4d93d",
+ "affc7dad-52dc-4b98-9b5d-51e65d8a8ad0",
+]
+
DOCX_TEST_STRINGS = [
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
"49e168b7-d2ae-407f-a055-2167576f39a1",
@@ -63,6 +69,15 @@
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
]
+MSG_TEST_STRINGS = [
+ "# Email Message",
+ "**From:** test.sender@example.com",
+ "**To:** test.recipient@example.com",
+ "**Subject:** Test Email Message",
+ "## Content",
+ "This is the body of the test email message",
+]
+
DOCX_COMMENT_TEST_STRINGS = [
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
"49e168b7-d2ae-407f-a055-2167576f39a1",
@@ -144,6 +159,22 @@
"5bda1dd6",
]
+JSON_TEST_STRINGS = [
+ "5b64c88c-b3c3-4510-bcb8-da0b200602d8",
+ "9700dc99-6685-40b4-9a3a-5e406dcb37f3",
+]
+
+
+# --- Helper Functions ---
+def validate_strings(result, expected_strings, exclude_strings=None):
+ """Validate presence or absence of specific strings."""
+ text_content = result.text_content.replace("\\", "")
+ for string in expected_strings:
+ assert string in text_content
+ if exclude_strings:
+ for string in exclude_strings:
+ assert string not in text_content
+
@pytest.mark.skipif(
skip_remote,
@@ -177,39 +208,35 @@ def test_markitdown_local() -> None:
# Test XLSX processing
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.xlsx"))
- for test_string in XLSX_TEST_STRINGS:
+ validate_strings(result, XLSX_TEST_STRINGS)
+
+ # Test XLS processing
+ result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.xls"))
+ for test_string in XLS_TEST_STRINGS:
text_content = result.text_content.replace("\\", "")
assert test_string in text_content
# Test DOCX processing
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.docx"))
- for test_string in DOCX_TEST_STRINGS:
- text_content = result.text_content.replace("\\", "")
- assert test_string in text_content
+ validate_strings(result, DOCX_TEST_STRINGS)
# Test DOCX processing, with comments
result = markitdown.convert(
os.path.join(TEST_FILES_DIR, "test_with_comment.docx"),
style_map="comment-reference => ",
)
- for test_string in DOCX_COMMENT_TEST_STRINGS:
- text_content = result.text_content.replace("\\", "")
- assert test_string in text_content
+ validate_strings(result, DOCX_COMMENT_TEST_STRINGS)
# Test DOCX processing, with comments and setting style_map on init
markitdown_with_style_map = MarkItDown(style_map="comment-reference => ")
result = markitdown_with_style_map.convert(
os.path.join(TEST_FILES_DIR, "test_with_comment.docx")
)
- for test_string in DOCX_COMMENT_TEST_STRINGS:
- text_content = result.text_content.replace("\\", "")
- assert test_string in text_content
+ validate_strings(result, DOCX_COMMENT_TEST_STRINGS)
# Test PPTX processing
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.pptx"))
- for test_string in PPTX_TEST_STRINGS:
- text_content = result.text_content.replace("\\", "")
- assert test_string in text_content
+ validate_strings(result, PPTX_TEST_STRINGS)
# Test EML processing
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.eml"))
@@ -222,35 +249,25 @@ def test_markitdown_local() -> None:
result = markitdown.convert(
os.path.join(TEST_FILES_DIR, "test_blog.html"), url=BLOG_TEST_URL
)
- for test_string in BLOG_TEST_STRINGS:
- text_content = result.text_content.replace("\\", "")
- assert test_string in text_content
+ validate_strings(result, BLOG_TEST_STRINGS)
# Test ZIP file processing
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_files.zip"))
- for test_string in DOCX_TEST_STRINGS:
- text_content = result.text_content.replace("\\", "")
- assert test_string in text_content
+ validate_strings(result, XLSX_TEST_STRINGS)
# Test Wikipedia processing
result = markitdown.convert(
os.path.join(TEST_FILES_DIR, "test_wikipedia.html"), url=WIKIPEDIA_TEST_URL
)
text_content = result.text_content.replace("\\", "")
- for test_string in WIKIPEDIA_TEST_EXCLUDES:
- assert test_string not in text_content
- for test_string in WIKIPEDIA_TEST_STRINGS:
- assert test_string in text_content
+ validate_strings(result, WIKIPEDIA_TEST_STRINGS, WIKIPEDIA_TEST_EXCLUDES)
# Test Bing processing
result = markitdown.convert(
os.path.join(TEST_FILES_DIR, "test_serp.html"), url=SERP_TEST_URL
)
text_content = result.text_content.replace("\\", "")
- for test_string in SERP_TEST_EXCLUDES:
- assert test_string not in text_content
- for test_string in SERP_TEST_STRINGS:
- assert test_string in text_content
+ validate_strings(result, SERP_TEST_STRINGS, SERP_TEST_EXCLUDES)
# Test RSS processing
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_rss.xml"))
@@ -260,9 +277,20 @@ def test_markitdown_local() -> None:
## Test non-UTF-8 encoding
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_mskanji.csv"))
- text_content = result.text_content.replace("\\", "")
- for test_string in CSV_CP932_TEST_STRINGS:
- assert test_string in text_content
+ validate_strings(result, CSV_CP932_TEST_STRINGS)
+
+ # Test MSG (Outlook email) processing
+ result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_outlook_msg.msg"))
+ validate_strings(result, MSG_TEST_STRINGS)
+
+ # Test JSON processing
+ result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.json"))
+ validate_strings(result, JSON_TEST_STRINGS)
+
+ # Test input with leading blank characters
+ input_data = b" \n\n\nTest
"
+ result = markitdown.convert_stream(io.BytesIO(input_data))
+ assert "# Test" in result.text_content
@pytest.mark.skipif(
@@ -270,9 +298,29 @@ def test_markitdown_local() -> None:
reason="do not run if exiftool is not installed",
)
def test_markitdown_exiftool() -> None:
- markitdown = MarkItDown()
+ # Test the automatic discovery of exiftool throws a warning
+ # and is disabled
+ try:
+ with catch_warnings(record=True) as w:
+ markitdown = MarkItDown()
+ result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.jpg"))
+ assert len(w) == 1
+ assert w[0].category is DeprecationWarning
+ assert result.text_content.strip() == ""
+ finally:
+ resetwarnings()
- # Test JPG metadata processing
+ # Test explicitly setting the location of exiftool
+ which_exiftool = shutil.which("exiftool")
+ markitdown = MarkItDown(exiftool_path=which_exiftool)
+ result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.jpg"))
+ for key in JPG_TEST_EXIFTOOL:
+ target = f"{key}: {JPG_TEST_EXIFTOOL[key]}"
+ assert target in result.text_content
+
+ # Test setting the exiftool path through an environment variable
+ os.environ["EXIFTOOL_PATH"] = which_exiftool
+ markitdown = MarkItDown()
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.jpg"))
for key in JPG_TEST_EXIFTOOL:
target = f"{key}: {JPG_TEST_EXIFTOOL[key]}"
@@ -334,8 +382,8 @@ def test_markitdown_llm() -> None:
if __name__ == "__main__":
"""Runs this file's tests from the command line."""
- test_markitdown_remote()
- test_markitdown_local()
+ # test_markitdown_remote()
+ # test_markitdown_local()
test_markitdown_exiftool()
- test_markitdown_deprecation()
- test_markitdown_llm()
+ # test_markitdown_deprecation()
+ # test_markitdown_llm()