简体   繁体   中英

Java - the method get(Class) is undefined for the type String

I receive the following error when running my code :

The method get(Class) is undefined for the type String.

I understand why that I get the issue because the get() method can not be run on a String. However I was hoping someone could advise me on how to fix this issue? All advice is very much appreciated. The section of code where I get the issue is:

public static void averageSent(String review) 
  { 
        //populate sentence array with each sentence of the review
        String [] sentences = review.split("[!?.]+");

        //number of sentences in each review
        int review_sentences = review.split("[!?.]+").length;

        //array of sentiments for each review
        String sentiments[] = new String[review_sentences];

        //initialise total
        int total = 0;

        //populate sentiments array
        for (int i=0; i< review_sentences; i++)
            {
                **sentiments[i]=sentences[i].get(SentimentCoreAnnotations.SentimentClass.class).toString();**
            }

        //output sentiments array
        for (int i =0; i < sentiments.length; i++)
        {
            System.out.println("SENTIMENTS" +sentiments[i]);
        }

        //total up a score for the array
        for (String s: sentiments)
        {
            ...
        }
    }

The entire code is :

import java.io.*;
import java.util.*;

import edu.stanford.nlp.coref.CorefCoreAnnotations;

import edu.stanford.nlp.coref.data.CorefChain;
import edu.stanford.nlp.io.*;
import edu.stanford.nlp.io.EncodingPrintWriter.out;
import edu.stanford.nlp.ling.*;
import edu.stanford.nlp.pipeline.*;
import edu.stanford.nlp.pipeline.CoreNLPProtos.Sentiment;
import edu.stanford.nlp.semgraph.SemanticGraph;
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations;
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations;
import edu.stanford.nlp.trees.*;
import edu.stanford.nlp.util.*;
import edu.stanford.nlp.sentiment.Evaluate;
import org.apache.commons.io.FileUtils;

/** This class demonstrates building and using a Stanford CoreNLP pipeline. */
public class sentimentMain {

  /** Usage: java -cp "*" StanfordCoreNlpDemo [inputFile [outputTextFile [outputXmlFile]]] */
  public static void main(String[] args) throws IOException {

      //ArrayList<String> Sentences = new ArrayList<String>();
      //ArrayList<String> sentence_sentiment = new ArrayList<String>();

    // set up optional output files
    PrintWriter out;
    if (args.length > 1) {
      out = new PrintWriter(args[1]);
    } else {
      out = new PrintWriter(System.out);
    }
    PrintWriter xmlOut = null;
    if (args.length > 2) {
      xmlOut = new PrintWriter(args[2]);
    }
    // Add in sentiment
    Properties props = new Properties();
    props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref, sentiment");

    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    File[] files = new File("C:/stanford-corenlp-full-2016-10-31/dataset").listFiles();

    String line = null;

    try{
        for (File file : files) {
            if (file.exists()) {
                BufferedReader in = new BufferedReader(new FileReader(file));
                String str = FileUtils.readFileToString(file);
                while((line = in.readLine()) != null)
                {
                    Annotation document = new Annotation(line);

                    // run all the selected Annotators on this text
                    pipeline.annotate(document);

                    // this prints out the results of sentence analysis to file(s) in good formats
                    pipeline.prettyPrint(document, out);
                    if (xmlOut != null) {
                      pipeline.xmlPrint(document, xmlOut);
                    }

                    List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
                    if (sentences != null && ! sentences.isEmpty()) {
                      CoreMap sentence = sentences.get(0);
                      for (CoreMap token : sentence.get(CoreAnnotations.TokensAnnotation.class)) {
                      }
                      Tree tree = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
                      tree.pennPrint(out);
                      SemanticGraph graph = sentence.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class);   
                      Map<Integer, CorefChain> corefChains =
                          document.get(CorefCoreAnnotations.CorefChainAnnotation.class);
                      if (corefChains == null) { return; }
                      for (Map.Entry<Integer,CorefChain> entry: corefChains.entrySet()) {
                        for (CorefChain.CorefMention m : entry.getValue().getMentionsInTextualOrder()) {
                          // We need to subtract one since the indices count from 1 but the Lists start from 0
                          List<CoreLabel> tokens = sentences.get(m.sentNum - 1).get(CoreAnnotations.TokensAnnotation.class);
                        }
                      }
                      out.println("The first sentence overall sentiment rating is " + sentence.get(SentimentCoreAnnotations.SentimentClass.class));

                    }
                    }

                      //Sentences.forEach(s -> System.out.println(s));
                      //sentence_sentiment.forEach(s -> System.out.println(s));
                      averageSent(str); 
                in.close();
            } else {
                System.out.println("File: " + file.getName() + file.toString());
            }
        }
    }catch(NullPointerException e){
        e.printStackTrace();
    }
    IOUtils.closeIgnoringExceptions(out);
    IOUtils.closeIgnoringExceptions(xmlOut);
  }

  public static void averageSent(String review) 
  { 
        //populate sentence array with each sentence of the review
        String [] sentences = review.split("[!?.]+");

        //number of sentences in each review
        int review_sentences = review.split("[!?.]+").length;

        //array of sentiments for each review
        String sentiments[] = new String[review_sentences];

        //initialise total
        int total = 0;

        //populate sentiments array
        for (int i=0; i< review_sentences; i++)
            {
                sentiments[i]=sentences[i].get(SentimentCoreAnnotations.SentimentClass.class).toString();
            }

        //output sentiments array
        for (int i =0; i < sentiments.length; i++)
        {
            System.out.println("SENTIMENTS" +sentiments[i]);
        }

        //total up a score for the array
        for (String s: sentiments)
        {

            if (s.equals("Very positive"))
            {
                int veryPos = 4;
                total += veryPos;
            }
            else if (s.equals("Positive"))
            {
                int pos = 3;
                total += pos;
            }
            else if (s.equals("Negative"))
            {
                int neg = 2;
                total += neg;
            }
            else if (s.equals("Very negative"))
            {
                int veryNeg = 1;
                total += veryNeg;
            }
            else if (s.equals("Neutral"))
            {
                int neu = 0;
                total += neu;
            }

            //System.out.println("Total " +total);
    }

  }


}

The main code used is Stanford CoreNLP sentiment analysis model. However as this model can only give me the sentiment of individual sentences within movie reviews, I have designed this method so it will allow me to get an average sentiment of an entire review. The get() method returns the sentiment of the sentence. It works if the object is from the CoreMap, however I need it to get the sentiment of a string so it can populate a string array of the multiple sentiments for each of the movie reviews.

Here is some sample code for going through the sentiment of each sentiment and counting the "Positive", "Negative", and "Neutral" sentences.

package edu.stanford.nlp.examples;

import edu.stanford.nlp.ling.*;
import edu.stanford.nlp.pipeline.*;
import edu.stanford.nlp.sentiment.*;
import edu.stanford.nlp.util.*;
import java.util.Properties;

public class AverageDocumentSentiment {

  public static void main(String[] args) {
    Annotation document =
        new Annotation("...movie review text...");
    Properties props = new Properties();
    props.setProperty("annotators", "tokenize,ssplit,pos,lemma,ner,parse,sentiment");
    props.setProperty("parse.binaryTrees","true");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    pipeline.annotate(document);
    int totalNegative = 0;
    int totalPositive = 0;
    int totalNeutral = 0;
    int total = 0;
    for (CoreMap sentence : document.get(CoreAnnotations.SentencesAnnotation.class)) {
      String sentenceSentiment = sentence.get(SentimentCoreAnnotations.SentimentClass.class);
      System.out.println(sentenceSentiment);
      if (sentenceSentiment.equals("Negative")) {
        totalNegative++;
      } else if (sentenceSentiment.equals("Positive")) {
        totalPositive++;
      } else {
        totalNeutral++;
      }
      total++ ;
    }
  }
}

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM