简体   繁体   中英

python remove the last line from the written file with file.write

Hello I have opened file and I'm writing new lines to my file but in some cases I need to remove the last line witches written so how can I do?

ops = open("Ops.txt", "w")

for sepraded in sepradedObject :
        lastWord = ""
        for nameOfObject in sepraded:
            if(nameOfObject == "NN"):
                nn = lastWord 
                #here i need first remove the old one and than add this line
                ops.write(nnp + ";" + nn + ";" + predicate+";\n")
            if(nameOfObject == "NNP"):
                nnp = lastWord
                ops.write(nnp + ";" + subject + ";" + predicate+";\n")

            lastWord = nameOfObject

here the full of my code to know what happend and what I'm doing now (I don't care about resource or any other thing just I want the code works only ) so what can I do to remove the last line and added the new Line in ops file? :

# 1- Install nltk , with this command : pip install --user -U nltk And pip install --user -U numpy
# 2- Install nltk data , with the method : nltk.download() | its take over 3H with 8Mbps And 20 min with 40Mbps (over 2.9 GB)
# 3- Read src to get Sentences
# 4- Take loop to get each line of Sentense from the source
# 5- use command to get RDF library : pip install rdflib

# Import the nltk Library that Including above 3Gb Data
from nltk.tokenize import word_tokenize
import nltk
# Import RDF Library
import rdflib
# Just use once nltk.download() to get nlp(nltk) library data.
# nltk.download()
# r means Read
print("--------------Read the inputs from File-------------")
sentences = open("inputs.txt", "r")
print("--------------Read the inputs from File Completed-------------")
# w means write on file with remove all the content that file have
print("--------------Open File to Write The Tags-------------")
intoFileTags = open("srcTags.txt", "w")
for sentence in sentences:
    tokenizedSentence = word_tokenize(sentence)
    taggedSentence = nltk.pos_tag(tokenizedSentence)
    for tagWord in taggedSentence:
        i = 0;
        for word in tagWord:
            intoFileTags.write(word) if i == 1 else intoFileTags.write(word+"->")
            i = 1
        intoFileTags.write(";")
        i = 0
    intoFileTags.write("\n")
intoFileTags.close()
print("-----------Making The src Tag with word tokenize And split tags with pos tag Completed------")
print("----------------Close srcTag Completed-----------")
print("-----------Next Step Make RDF From Last Step We have---------------")

tagsFromFile = open("srcTags.txt", "r")
ops = open("Ops.txt", "w")
for line in tagsFromFile:
    sentenceWithTagsAndWord = line.split(";")
    subject = "";
    predicate = "";
    obj = "";
    afterVBZ = False;
    for tagAndWord in sentenceWithTagsAndWord:
        if tagAndWord == "\n":
            continue
        tw = tagAndWord.split("->")
        if tw[1] == "VBP" or tw[1] == "VBZ" or  tw[1] == "VBN":
            afterVBZ = True;
            subject = tw[0] + " ";
            continue
        if afterVBZ == True and tw[1] != "IN" :
            predicate += tw[0] + " ";
        if afterVBZ == False and (tw[1] != "VBZ" or tw[1] != "VBP") :
            obj += tw[0] + " ";
# Write the subject , object and predicate to Ops file -> now we are in step one!
#  Remove Withspeace and dot or question mark
    obj = obj.strip()
    subject = subject.strip()
    predicate = predicate.replace(".", "").replace("?", "").replace("!", "")
    predicate = predicate.strip()
    # so we have object , subject and predicate to normlaization for each object we have extra line to sepraded
    
    objSen = word_tokenize(obj)
    sepradedObject = nltk.pos_tag(objSen)
    nnp = ""
    nn = ""
    for sepraded in sepradedObject :
        lastWord = ""
        for nameOfObject in sepraded:
            if(nameOfObject == "NN"):
                nn = lastWord 
                #here i need first remove the old one and than add this line
                ops.write(nnp + ";" + nn + ";" + predicate+";\n")
            if(nameOfObject == "NNP"):
                nnp = lastWord
                ops.write(nnp + ";" + subject + ";" + predicate+";\n")

            lastWord = nameOfObject
ops.close()

print("-------------Write ops file for STEP ONE! Completed----------")
# create a Graph
graph = rdflib.Graph()
# Create schemas
# we use BNode because we don't know the relation
partOfSpeech = rdflib.Namespace("http://example.org/")
graph.bind("partOfSpeech", partOfSpeech)

# bob is subject , is a predicate , person is object
# Create the node like : bob = partOfSpeech['bob']



Right, so you can just compute the lines in memory, then write them out in a separate loop.

nnp = ""
nn = ""
lines = []
for sepraded in sepradedObject:
    lastWord = ""
    for nameOfObject in sepraded:
        if nameOfObject == "NN":
            nn = lastWord
            if lines:
                lines.pop(-1)  # Remove last line
            lines.append(nnp + ";" + nn + ";" + predicate + ";")
        if nameOfObject == "NNP":
            nnp = lastWord
            lines.append(nnp + ";" + subject + ";" + predicate + ";")
        lastWord = nameOfObject

for line in lines:
    print(line, file=ops)  # `print` takes care of newlines

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM