Over a million developers have joined DZone.
{{announcement.body}}
{{announcement.title}}

Parsing PDFs and Identifying Names Using Open NLP and Tika

DZone's Guide to

Parsing PDFs and Identifying Names Using Open NLP and Tika

Learn how to create a data processing pipeline with Java 8 to extract text from PDFs and then identify people's names.

· Big Data Zone ·
Free Resource

Hortonworks Sandbox for HDP and HDF is your chance to get started on learning, developing, testing and trying out new features. Each download comes preconfigured with interactive tutorials, sample data and developments from the Apache community.

To create a simple Java 8 application to extract text from PDFs and then identify people's names, I have created a simple data application. This can be used as part of a larger data processing pipeline or HDF flow by calling it via REST or the command line or converting it to a NiFi processor.

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <groupId>com.dataflowdeveloper</groupId>
  <artifactId>categorizer</artifactId>
  <packaging>jar</packaging>
  <version>1.0</version>
  <name>categorizer</name>

  <url>http://maven.apache.org</url>

  <dependencies>

    <dependency>
      <groupId>junit</groupId>
      <artifactId>junit</artifactId>
      <version>3.8.1</version>
      <scope>test</scope>
    </dependency>
    <dependency>
        <groupId>org.slf4j</groupId>
    <artifactId>slf4j-simple</artifactId>
    <version>1.7.7</version>
    </dependency>
   <dependency>
    <groupId>org.apache.opennlp</groupId>
    <artifactId>opennlp-tools</artifactId>
    <version>1.7.0</version>
    </dependency>
    <dependency>
    <groupId>com.google.code.gson</groupId>
    <artifactId>gson</artifactId>
    <version>2.8.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.tika/tika-core -->
<dependency>
    <groupId>org.apache.tika</groupId>
    <artifactId>tika-core</artifactId>
    <version>1.14</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.tika/tika-parsers -->
<dependency>
    <groupId>org.apache.tika</groupId>
    <artifactId>tika-parsers</artifactId>
    <version>1.14</version>
</dependency>
<dependency>
    <groupId>org.apache.tika</groupId>
    <artifactId>tika-langdetect</artifactId>
    <version>1.14</version>
</dependency>
<dependency>
      <groupId>org.apache.lucene</groupId>
      <artifactId>lucene-core</artifactId>
      <version>3.5.0</version>
    </dependency>
    <dependency>
    <groupId>commons-io</groupId>
    <artifactId>commons-io</artifactId>
    <version>2.5</version>
</dependency>
     <dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-client</artifactId>
    <version>2.7.3</version>
</dependency>
<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-common</artifactId>
    <version>2.7.3</version>
</dependency>
<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-hdfs</artifactId>
    <version>2.7.3</version>
</dependency>
<dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-common</artifactId>
        <version>2.7.3</version>
    </dependency>
  </dependencies>
</project>

Java application:

package com.dataflowdeveloper;

import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.util.logging.Level;
import java.util.logging.Logger;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import com.google.gson.Gson;
import opennlp.tools.namefind.NameFinderME;
import opennlp.tools.namefind.TokenNameFinderModel;
import opennlp.tools.tokenize.SimpleTokenizer;
import opennlp.tools.tokenize.Tokenizer;
import opennlp.tools.tokenize.TokenizerME;
import opennlp.tools.tokenize.TokenizerModel;
import opennlp.tools.util.InvalidFormatException;
import opennlp.tools.util.Span;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import org.apache.tika.exception.TikaException;
import org.apache.tika.metadata.Metadata;
import org.apache.tika.parser.ParseContext;
import org.apache.tika.parser.pdf.PDFParser;
import org.apache.tika.sax.BodyContentHandler;
import org.xml.sax.SAXException;

public class App {

public static void main(String args[]) {
BodyContentHandler handler = new BodyContentHandler();
Metadata metadata = new Metadata();
FileInputStream inputstream = null;
try {
inputstream = new FileInputStream(new File(System.getProperty("user.dir") + "/testdocs/opennlp.pdf"));
} catch (FileNotFoundException e1) {
e1.printStackTrace();
}
ParseContext pcontext = new ParseContext();
// parsing the document using PDF parser
PDFParser pdfparser = new PDFParser();
try {
pdfparser.parse(inputstream, handler, metadata, pcontext);
} catch (SAXException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} catch (TikaException e) {
e.printStackTrace();
}

NameFinder nameFinder = new NameFinder();
System.out.println(nameFinder.getPeople(handler.toString()));
}
}

package com.dataflowdeveloper;


import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;


import com.google.gson.Gson;
import opennlp.tools.namefind.NameFinderME;
import opennlp.tools.namefind.TokenNameFinderModel;
import opennlp.tools.tokenize.SimpleTokenizer;
import opennlp.tools.tokenize.Tokenizer;
import opennlp.tools.tokenize.TokenizerME;
import opennlp.tools.tokenize.TokenizerModel;
import opennlp.tools.util.InvalidFormatException;
import opennlp.tools.util.Span;

/**
 * 
 */
public class NameFinder {
private static final String CURRENT_DIR = System.getProperty("user.dir");
private static final String OLD_FILE = "/Volumes/Transcend/projects/categorizer/input/en-ner-person.bin";
private static final String CURRENT_FILE = CURRENT_DIR + "/input/en-ner-person.bin";
private static final String CURRENT_TOKEN_FILE = CURRENT_DIR + "/input/en-token.bin";

/**
 * sentence to people
 * @param sentence
 * @return JSON
 */
public String getPeople(String sentence) {
// 
String outputJSON = "";
TokenNameFinderModel model = null;
InputStream tokenStream = null;
Tokenizer tokenizer = null;
try {
     tokenStream = new FileInputStream( new File(CURRENT_TOKEN_FILE));

model = new TokenNameFinderModel(
new File(CURRENT_FILE));
 TokenizerModel tokenModel = new TokenizerModel(tokenStream);
tokenizer = new TokenizerME(tokenModel);
} catch (InvalidFormatException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}

// Create a NameFinder using the model
NameFinderME finder = new NameFinderME(model);

// Split the sentence into tokens
String[] tokens = tokenizer.tokenize(sentence);

// Find the names in the tokens and return Span objects
Span[] nameSpans = finder.find(tokens);

List<PersonName> people = new ArrayList<PersonName>();
String[] spanns = Span.spansToStrings(nameSpans, tokens);
for (int i = 0; i < spanns.length; i++) {
people.add(new PersonName(spanns[i]));
}

outputJSON = new Gson().toJson(people);
finder.clearAdaptiveData();
return "{\"names\":" + outputJSON + "}";
}

}

Process

  1. We open the file stream for reading (this can be from HDFS, S3, or a regular file system).

  2. Then we use Apache Tika's PDF Parser to parse out the text. We also get the metadata for other processing.

  3. Using OpenNLP, we parse out all the names from that text.

  4. Using Google GSON, we then turn the names into JSON for easy usage.

References

  1. https://raw.githubusercontent.com/apache/tika/master/tika-example/pom.xml
  2. https://github.com/apache/tika/tree/master/tika-example

Hortonworks Community Connection (HCC) is an online collaboration destination for developers, DevOps, customers and partners to get answers to questions, collaborate on technical articles and share code examples from GitHub.  Join the discussion.

Topics:
big data ,tutorial ,tika ,open nlp ,parsing

Published at DZone with permission of

Opinions expressed by DZone contributors are their own.

{{ parent.title || parent.header.title}}

{{ parent.tldr }}

{{ parent.urlSource.name }}