简体   繁体   中英

File not found / How to make Python recognize all the files in my folder?

I am an unexperienced programmer and I had to make a chatbot for a project in school, I succeeded in making it but I have a problem with the files I need to import

Using the full file path works, but I can't possible do that since I have to send it to my teacher for review and then present it at school, I have to make it so it recognizes the file in its folder.

I have all the files in one folder, everything is named correctly and I didn't misspell anything (since only pasting the file path before the name I'm trying to import solves my issue). It just keeps saying that it can't find the file, I found multiple questions about this issue, with some work around and some things that solved the issue, but not for me, how do I straight up tell my code " Look here idiot! It's in THE FOLDER THAT YOU ARE IN! "

CHATBOT ( The one I'm having issues with ):

import random
import json
import pickle
import numpy as np

import nltk
from nltk.stem import WordNetLemmatizer

from keras.models import load_model

lemmatizer = WordNetLemmatizer()
intentii = json.loads(open('intentii.json').read())

cuvinte = pickle.load(open('cuvinte.pkl', 'rb'))
clase = pickle.load(open('clase.pkl', 'rb'))
model = load_model('chatbotmodel.h5')

def clean_up_sentence(sentence):
    sentence_cuvinte = nltk.word_tokenize(sentence)
    sentence_cuvinte = [lemmatizer.lemmatize(cuvant) for cuvant     in     sentence_cuvinte]
    return sentence_cuvinte

def pachet_de_cuvinte(sentence):
    sentence_cuvinte = clean_up_sentence(sentence)
    pachet = [0] * len(cuvinte)
        for w in sentence_cuvinte:
        for i, cuvant in enumerate(cuvinte):
            if cuvant == w:
                pachet[i] = 1
    return np.array(pachet)

def predict_class(sentence):
    bow = pachet_de_cuvinte(sentence)
    res = model.predict(np.array([bow]))[0]
    LIMITA_EROARE = 0.25
    rezultate = [[i, r] for i, r in enumerate(res) if r >     LIMITA_EROARE]

    rezultate.sort(key=lambda x: x[1], reverse=True)
    return_list = []
    for r in rezultate:
        return_list.append({'intentie': clase[r[0]], 'probabilitate': str(r[1])})
    return return_list

def primeste_raspuns(lista_intentii, intentii_json):
    tag = lista_intentii[0]['intentie']
    lista_de_intentii = intentii_json['intentii']
    for i in lista_de_intentii:
        if i['tag'] == tag:
            result = random.choice(i['raspunsuri'])
            break
    return result

print("V.A.S.I.L.E a fost initializat!")

while True:
    message = input("")
    ints = predict_class(message)
    res = primeste_raspuns(ints, intentii)
    print(res)

TRAINING (The one where I used the same import for my files and it worked):

from random import random

import random
import json
import pickle
import numpy as np

import nltk
from nltk.stem import WordNetLemmatizer

from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD

lematizare = WordNetLemmatizer()

intentii = json.loads(open('intentii.json').read())

cuvinte = []
clase = []
documente = []
ignora_simboluri = ['?', '!', ',', '.']

for intentie in intentii['intentii']:
    for pattern in intentie['patterns']:
        word_list = nltk.word_tokenize(pattern)
        cuvinte.extend(word_list)
        documente.append((word_list, intentie['tag']))
        if intentie['tag'] not in clase:
            clase.append(intentie['tag'])
        
cuvinte = [lematizare.lemmatize(cuvant) for cuvant in cuvinte if cuvant not in ignora_simboluri]
cuvinte = sorted(set(cuvinte))

clase = sorted(set(clase))

pickle.dump(cuvinte, open('cuvinte.pkl', 'wb'))
pickle.dump(clase, open('clase.pkl', 'wb'))

training = []
output_gol = [0] * len(clase)

for document in documente:
    pachet = []
    cuvant_patterns = document[0]
    cuvant_patterns = [lematizare.lemmatize(cuvant.lower()) for cuvant in cuvant_patterns]
    for cuvant in cuvinte:
        pachet.append(1) if cuvant in cuvant_patterns else     pachet.append(0)
    
    output_row = list(output_gol)
    output_row[clase.index(document[1])] = 1
    training.append([pachet, output_row])

random.shuffle(training)
training = np.array(training)

train_x = list(training[:, 0])
train_y = list(training[:, 1])

model = Sequential()
model.add(Dense(512, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

tmodel = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbotmodel.h5', tmodel)
print("Gata!")

我不完全是你的问题,但我觉得你在问这个./filename ,但如果你使用的是 Windows,那么它就是.\ (.\filename)

to get the relative filepaths, use __file__

from pathlib import Path

this_file = Path(__file__).absolute()
this_file_directory = this_file.parent

so if your structure is:

top_dir
├── code_dir
|    └── main.py
└── data
    └── something.json

in main.py

from pathlib import Path

top_dir = Path(__file__).absolute().parent.parent
data_dir = top_dir / 'data'
data_file = data_dir / 'something.json'

and this will work for anyone

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM