| | from rank_bm25 import BM25Plus |
| | import os |
| | import sys |
| | import re |
| | from nltk.corpus import stopwords |
| | from nltk.stem import PorterStemmer, WordNetLemmatizer |
| |
|
| |
|
| | def read_corpus(corpus_files_path): |
| | corpus = [] |
| |
|
| | corpus_files = os.listdir(corpus_files_path) |
| | for corpus_file in corpus_files: |
| | with open(os.path.join(corpus_files_path, corpus_file), 'r') as input_file: |
| | tmp = input_file.read() |
| |
|
| | corpus.append(tmp) |
| | |
| | return corpus |
| |
|
| | def normalize_text(text): |
| | |
| | text = text.lower() |
| | |
| | |
| | words = re.findall(r'\w+|[^\s\w]+', text) |
| | |
| | |
| | stop_words = set(stopwords.words('english')) |
| | words = [word for word in words if word not in stop_words] |
| | |
| | |
| | stemmer = PorterStemmer() |
| | words = [stemmer.stem(word) for word in words] |
| | |
| | |
| | lemmatizer = WordNetLemmatizer() |
| | words = [lemmatizer.lemmatize(word) for word in words] |
| | |
| | return words |
| |
|
| |
|
| | |
| | def tokenize_code(code): |
| | |
| | return normalize_text(code) |
| |
|
| | def main(): |
| | corpus_files_path = "functions" |
| | query_files_path = "functions_with_unitTest" |
| | match_results_path = "potential_function_pair" |
| |
|
| | project = sys.argv[1] |
| | corpus_lang = sys.argv[2] |
| | query_lang = sys.argv[3] |
| |
|
| | corpus_files_path = os.path.join(corpus_files_path, project, corpus_lang) |
| | query_files_path = os.path.join(query_files_path, project, query_lang) |
| | match_results_path = os.path.join(match_results_path, project, f"{query_lang}__{corpus_lang}") |
| | query_files = os.listdir(query_files_path) |
| |
|
| | |
| | corpus = read_corpus(corpus_files_path) |
| | tokenized_corpus = [tokenize_code(doc) for doc in corpus] |
| | bm25 = BM25Plus(tokenized_corpus) |
| |
|
| | |
| | for query_file in query_files: |
| | with open(os.path.join(query_files_path, query_file), 'r') as input_file: |
| | query = input_file.read() |
| |
|
| | |
| | |
| | tokenized_query = tokenize_code(query) |
| | |
| | |
| | scores = bm25.get_scores(tokenized_query) |
| | |
| | top_n = 10 |
| | match_results_index = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)[:top_n] |
| |
|
| | |
| | if not os.path.exists(match_results_path): |
| | os.makedirs(match_results_path) |
| | |
| | |
| | with open(os.path.join(match_results_path, query_file), 'w') as output_file: |
| | output_file.write("<Target function>\n") |
| | output_file.write(query) |
| | output_file.write("\n</Target function>\n\n") |
| | |
| | |
| | |
| | output_file.write("<Possible matching functions>\n") |
| | i = 1 |
| | for index in match_results_index: |
| | output_file.write("<Function {}> \n{}\n</Function {}>\n\n".format(i, corpus[index], i)) |
| | |
| | i += 1 |
| | output_file.write("</Possible matching functions>\n") |
| | |
| |
|
| | if __name__ == "__main__" : |
| | main() |