Showing
6 changed files
with
373 additions
and
0 deletions
.idea/vcs.xml
0 → 100644
biologicalTermTagging.py
0 → 100644
This diff is collapsed. Click to expand it.
nlp-preprocessing-pipeline.sh
0 → 100644
1 | +#!/bin/sh | ||
2 | +echo 'Preprocessing files...' | ||
3 | +ORIGINAL_CORPUS_PATH=/export/space1/users/compu2/bionlp/conditional-random-fields/data-sets/original | ||
4 | +CORPUS_PATH=/export/space1/users/compu2/bionlp/conditional-random-fields/data-sets | ||
5 | +TERM_PATH=/export/space1/users/compu2/bionlp/conditional-random-fields/dictionaries | ||
6 | + | ||
7 | +PRE=TRUE | ||
8 | +echo " Preprocessing: $PRE" | ||
9 | +POS=TRUE | ||
10 | +echo " POS Tagging: $POS" | ||
11 | +LEMMA=TRUE | ||
12 | +echo " Lemmatization: $LEMMA" | ||
13 | +TERM=TRUE | ||
14 | +echo " Terminological tagging: $TERM" | ||
15 | +TRANS=TRUE | ||
16 | +echo " Transformation: $TRANS" | ||
17 | + | ||
18 | +if [ "$PRE" = "TRUE" ]; then | ||
19 | +echo "Preprocessing..." | ||
20 | +INPUT_PATH=$ORIGINAL_CORPUS_PATH | ||
21 | +OUTPUT_PATH=$CORPUS_PATH/preprocessed | ||
22 | +python3.4 preprocessingTermDetection.py --inputPath $INPUT_PATH --outputPath $OUTPUT_PATH --termDetection --termPath $TERM_PATH --termFiles termFilesLength_LREGULONDB.json > outputPreprocessing_lregulondb.txt | ||
23 | +# python3.4 preprocessingTermDetection.py --inputPath $INPUT_PATH --outputPath $OUTPUT_PATH > outputPreprocessing_lregulondb.txt | ||
24 | +fi | ||
25 | + | ||
26 | +if [ "$POS" = "TRUE" ]; then | ||
27 | +echo "POS Tagging..." | ||
28 | +INPUT_PATH=$CORPUS_PATH/preprocessed | ||
29 | +OUTPUT_PATH=$CORPUS_PATH/pos | ||
30 | +python3.4 posTaggingStanford.py --inputPath $INPUT_PATH --outputPath $OUTPUT_PATH --taggerPath /home/cmendezc/STANFORD_POSTAGGER/stanford-postagger-2015-12-09 --biolemmatizer > outputPOST_lregulondb.txt | ||
31 | +fi | ||
32 | + | ||
33 | +if [ "$LEMMA" = "TRUE" ]; then | ||
34 | +echo "Lemmatization..." | ||
35 | +INPUT_PATH=$CORPUS_PATH/pos | ||
36 | +OUTPUT_PATH=$CORPUS_PATH/lemma | ||
37 | +python3.4 biolemmatizing.py --inputPath $INPUT_PATH --outputPath $OUTPUT_PATH --biolemmatizerPath /home/cmendezc/BIO_LEMMATIZER > outputLemma_lregulondb.txt | ||
38 | +fi | ||
39 | + | ||
40 | +if [ "$TERM" = "TRUE" ]; then | ||
41 | +echo "Terminological tagging..." | ||
42 | +INPUT_PATH=$CORPUS_PATH/lemma | ||
43 | +OUTPUT_PATH=$CORPUS_PATH/term | ||
44 | +python3.4 biologicalTermTagging.py --inputPath $INPUT_PATH --outputPath $OUTPUT_PATH --termPath $TERM_PATH --termFiles termFilesTag_LREGULONDB.json > outputTerm_lregulondb.txt | ||
45 | +fi | ||
46 | + | ||
47 | +if [ "$TRANS" = "TRUE" ]; then | ||
48 | +echo "Transformation..." | ||
49 | +INPUT_PATH=$CORPUS_PATH/term | ||
50 | +OUTPUT_PATH=$CORPUS_PATH/transformed | ||
51 | +python3.4 transforming.py --inputPath $INPUT_PATH --outputPath $OUTPUT_PATH --minWordsInLine 5 > outputTransformation_lregulondb.txt | ||
52 | +fi |
posTaggingStanford.py
0 → 100644
1 | +# -*- coding: UTF-8 -*- | ||
2 | + | ||
3 | +from optparse import OptionParser | ||
4 | +import os | ||
5 | +import sys | ||
6 | +from time import time | ||
7 | +from subprocess import call | ||
8 | + | ||
9 | +__author__ = 'CMendezC' | ||
10 | + | ||
11 | +# Objective: Part-of-Speech Tagging of several files with Stanford POS Tagger. | ||
12 | + | ||
13 | +# Parameters: | ||
14 | +# 1) --inputPath Path to read TXT files. | ||
15 | +# 2) --outputPath Path to place POST files. | ||
16 | +# 3) --taggerPath Path POS Tagger command. | ||
17 | +# 4) --biolemmatizer Format for biolemmatizer?. | ||
18 | + | ||
19 | +# Output: | ||
20 | +# 1) POS Tagged files. | ||
21 | +# 2) If --biolemmatizer with format: | ||
22 | +# Rob NNP | ||
23 | +# is VBZ | ||
24 | +# a DT | ||
25 | +# transcriptional JJ | ||
26 | +# dual JJ | ||
27 | +# regulator NN | ||
28 | +# . . | ||
29 | +# | ||
30 | +# Its PRP$ | ||
31 | +# N-terminal JJ ... | ||
32 | + | ||
33 | +# Execution: | ||
34 | +# GntR | ||
35 | +# python posTaggingStanford.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT\ECK120012096_GntR\preprocessed --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT\ECK120012096_GntR\post --taggerPath C:\Users\cmendezc\Documents\GENOMICAS\STANFORD_POSTAGGER\stanford-postagger-2015-12-09 --biolemmatizer | ||
36 | + | ||
37 | +# FhlA | ||
38 | +# python posTaggingStanford.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011394_FhlA\preprocessed --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011394_FhlA\post --taggerPath C:\Users\cmendezc\Documents\GENOMICAS\STANFORD_POSTAGGER\stanford-postagger-2015-12-09 --biolemmatizer | ||
39 | + | ||
40 | +# MarA | ||
41 | +# python posTaggingStanford.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011412_MarA\preprocessed --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011412_MarA\post --taggerPath C:\Users\cmendezc\Documents\GENOMICAS\STANFORD_POSTAGGER\stanford-postagger-2015-12-09 --biolemmatizer | ||
42 | + | ||
43 | +# ArgR | ||
44 | +# python posTaggingStanford.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011670_ArgR\preprocessed --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011670_ArgR\post --taggerPath C:\Users\cmendezc\Documents\GENOMICAS\STANFORD_POSTAGGER\stanford-postagger-2015-12-09 --biolemmatizer | ||
45 | + | ||
46 | +# CytR | ||
47 | +# python posTaggingStanford.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120012407_CytR\preprocessed --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120012407_CytR\post --taggerPath C:\Users\cmendezc\Documents\GENOMICAS\STANFORD_POSTAGGER\stanford-postagger-2015-12-09 --biolemmatizer | ||
48 | + | ||
49 | +# Rob | ||
50 | +# python posTaggingStanford.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011190_Rob\preprocessed --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011190_Rob\post --taggerPath C:\Users\cmendezc\Documents\GENOMICAS\STANFORD_POSTAGGER\stanford-postagger-2015-12-09 --biolemmatizer | ||
51 | + | ||
52 | +# EXTRACTING REGULATORY INTERACTIONS | ||
53 | +# python posTaggingStanford.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\EXTRACTING_REGULATORY_INTERACTIONS\corpus_ecoli\preprocessed --outputPath C:\Users\cmendezc\Documents\GENOMICAS\EXTRACTING_REGULATORY_INTERACTIONS\corpus_ecoli\post --taggerPath C:\Users\cmendezc\Documents\GENOMICAS\STANFORD_POSTAGGER\stanford-postagger-2015-12-09 --biolemmatizer | ||
54 | + | ||
55 | +########################################################### | ||
56 | +# MAIN PROGRAM # | ||
57 | +########################################################### | ||
58 | + | ||
59 | +if __name__ == "__main__": | ||
60 | + # Parameter definition | ||
61 | + parser = OptionParser() | ||
62 | + parser.add_option("-i", "--inputPath", dest="inputPath", | ||
63 | + help="Path to read TXT files", metavar="PATH") | ||
64 | + parser.add_option("-o", "--outputPath", dest="outputPath", | ||
65 | + help="Path to place POST files", metavar="PATH") | ||
66 | + parser.add_option("-a", "--taggerPath", dest="taggerPath", default="", | ||
67 | + help="Path FreeLing analyzer files", metavar="PATH") | ||
68 | + parser.add_option("-p", "--biolemmatizer", default=False, | ||
69 | + action="store_true", dest="biolemmatizer", | ||
70 | + help="Format for biolemmatizer?") | ||
71 | + | ||
72 | + (options, args) = parser.parse_args() | ||
73 | + if len(args) > 0: | ||
74 | + parser.error("None parameters indicated.") | ||
75 | + sys.exit(1) | ||
76 | + | ||
77 | + # Printing parameter values | ||
78 | + print('-------------------------------- PARAMETERS --------------------------------') | ||
79 | + print("Path to read input files: " + str(options.inputPath)) | ||
80 | + print("Path to place output files: " + str(options.outputPath)) | ||
81 | + print("Path POS Tagger command: " + str(options.taggerPath)) | ||
82 | + print("Format for biolemmatizer?: " + str(options.biolemmatizer)) | ||
83 | + | ||
84 | + filesTagged = 0 | ||
85 | + t0 = time() | ||
86 | + print("Tagging corpus...") | ||
87 | + # Walk directory to read files | ||
88 | + for path, dirs, files in os.walk(options.inputPath): | ||
89 | + # For each file in dir | ||
90 | + for file in files: | ||
91 | + print(" Tagging file..." + str(file)) | ||
92 | + try: | ||
93 | + # FREELING: taggerPath = os.path.join(options.taggerPath, "analyzer.ex") | ||
94 | + # FREELING: command = taggerPath + " -f " + os.path.join("%FREELINGSHARE%", "config", "en.cfg") + " <" + os.path.join(path, file) + "> " + os.path.join(options.outputPath, file) + ".post.txt" | ||
95 | + | ||
96 | + # stanford-postagger models\english-left3words-distsim.tagger | ||
97 | + # C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TFsummaries_tagged_SGC_aspectRP-DOM\ECK120011190.Rob.sum.txt | ||
98 | + # > | ||
99 | + # C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\aspectsOfInterest_TrainingSet\testingTaggers\ECK120011190.Rob.sum.txt | ||
100 | + | ||
101 | + import platform | ||
102 | + plat = platform.system() | ||
103 | + if plat == 'Linux': | ||
104 | + # FOR LINUX | ||
105 | + # java -mx300m -cp 'stanford-postagger.jar:lib/*' edu.stanford.nlp.tagger.maxent.MaxentTagger | ||
106 | + # -model $1 -textFile $2 | ||
107 | + command = "java -mx300m -cp " + os.path.join(options.taggerPath, 'stanford-postagger.jar:') + \ | ||
108 | + os.path.join(options.taggerPath, 'lib/*') + \ | ||
109 | + ' edu.stanford.nlp.tagger.maxent.MaxentTagger -model ' + \ | ||
110 | + os.path.join(options.taggerPath, 'models', 'english-left3words-distsim.tagger') + \ | ||
111 | + ' -textFile ' + os.path.join(options.inputPath, file) + \ | ||
112 | + ' > ' + os.path.join(options.outputPath, file.replace('pre.txt', 'pos.txt')) | ||
113 | + else: | ||
114 | + # C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\preprocessingCorpus>java -mx300m | ||
115 | + # -cp "C:\Users\cmendezc\Documents\GENOMICAS\STANFORD_POSTAGGER\stanford-postagger-2015-12-09\stanford-postagger.jar; | ||
116 | + # C:\Users\cmendezc\Documents\GENOMICAS\STANFORD_POSTAGGER\stanford-postagger-2015-12-09\lib/*" | ||
117 | + # edu.stanford.nlp.tagger.maxent.MaxentTagger -model | ||
118 | + # C:\Users\cmendezc\Documents\GENOMICAS\STANFORD_POSTAGGER\stanford-postagger-2015-12-09\models\english-left3words-distsim.tagger | ||
119 | + # -textFile C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\aspectClassificationDatasets\preprocessed\ECK120011190.Rob.sum.pre.txt | ||
120 | + #taggerPath = os.path.join('java') | ||
121 | + command = "java -mx300m -cp " + os.path.join(options.taggerPath, 'stanford-postagger.jar;') + \ | ||
122 | + os.path.join(options.taggerPath, 'lib/*') + \ | ||
123 | + ' edu.stanford.nlp.tagger.maxent.MaxentTagger -model ' + \ | ||
124 | + os.path.join(options.taggerPath, 'models', 'english-left3words-distsim.tagger') + \ | ||
125 | + ' -textFile ' + os.path.join(options.inputPath, file) + \ | ||
126 | + ' > ' + os.path.join(options.outputPath, file.replace('pre.txt', 'pos.txt')) #print(command) | ||
127 | + | ||
128 | + retcode = call(command, shell=True) | ||
129 | + if retcode < 0: | ||
130 | + print(" Child was terminated by signal", -retcode, file=sys.stderr) | ||
131 | + else: | ||
132 | + print(" Child returned", retcode, file=sys.stderr) | ||
133 | + filesTagged += 1 | ||
134 | + except OSError as e: | ||
135 | + print(" Execution failed:", e, file=sys.stderr) | ||
136 | + | ||
137 | + text = "" | ||
138 | + if options.biolemmatizer: | ||
139 | + with open(os.path.join(options.outputPath, file.replace('pre.txt', 'pos.txt')), "r", encoding="utf-8", errors="replace") as iFile: | ||
140 | + text = iFile.read() | ||
141 | + # -LRB-_-LRB- PTS_NN -RRB-_-RRB- | ||
142 | + # for_IN Mlc_NN inactivation_NN ._. | ||
143 | + text = text.replace('-LRB-', '(') | ||
144 | + text = text.replace('-RRB-', ')') | ||
145 | + | ||
146 | + text = text.replace('-LSB-', '[') | ||
147 | + text = text.replace('-RSB-', ']') | ||
148 | + | ||
149 | + text = text.replace('_', '\t') | ||
150 | + text = text.replace(' ', '\n') | ||
151 | + text = text.replace('.\n', '.\n\n') | ||
152 | + with open(os.path.join(options.outputPath, file.replace('pre.txt', 'pos.txt')), "w", encoding="utf-8", errors="replace") as oFile: | ||
153 | + oFile.write(text) | ||
154 | + | ||
155 | + # Imprime archivos procesados | ||
156 | + print() | ||
157 | + print("Files POS Tagged: " + str(filesTagged)) | ||
158 | + print("Files POS Tagged in: %fs" % (time() - t0)) |
preprocessingTermDetection.py
0 → 100644
This diff is collapsed. Click to expand it.
transforming.py
0 → 100644
1 | +# -*- coding: UTF-8 -*- | ||
2 | +import re | ||
3 | +from optparse import OptionParser | ||
4 | +import os | ||
5 | +import sys | ||
6 | +from time import time | ||
7 | + | ||
8 | +__author__ = 'CMendezC' | ||
9 | + | ||
10 | +# Objective: Transforming BIOLemmatized files: | ||
11 | +# 1) Transformed files | ||
12 | +# 2) Text files to extract aspects | ||
13 | + | ||
14 | +# Parameters: | ||
15 | +# 1) --inputPath Path to read input files. | ||
16 | +# 2) --outputPath Path to place output files. | ||
17 | +# 3) --textPath Path to place output files. | ||
18 | +# 4) --minWordsInLine Minimum length sentence in number of words | ||
19 | +# 5) --classes Classes to indicate final of sentence when line contains: PMID\tNUMSENT\tSENT\tCLASS | ||
20 | + | ||
21 | +# Output: | ||
22 | +# 1) transformed files | ||
23 | +# 2) text files | ||
24 | + | ||
25 | +# Execution: | ||
26 | +# GntR | ||
27 | +# python transforming.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120012096_GntR\term --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120012096_GntR\transformed --minWordsInLine 5 | ||
28 | + | ||
29 | +# FhlA | ||
30 | +# python transforming.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011394_FhlA\term --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011394_FhlA\transformed --minWordsInLine 5 | ||
31 | + | ||
32 | +# MarA | ||
33 | +# python transforming.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011412_MarA\term --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011412_MarA\transformed --minWordsInLine 5 | ||
34 | + | ||
35 | +# ArgR | ||
36 | +# python transforming.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011670_ArgR\term --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011670_ArgR\transformed --minWordsInLine 5 | ||
37 | + | ||
38 | +# CytR | ||
39 | +# python transforming.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120012407_CytR\term --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120012407_CytR\transformed --minWordsInLine 5 | ||
40 | + | ||
41 | +# Rob | ||
42 | +# python transforming.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011190_Rob\term --outputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\TF_PMIDs_TXT_ECK120011190_Rob\transformed --minWordsInLine 5 | ||
43 | + | ||
44 | +# EXTRACTING REGULATORY INTERACTIONS | ||
45 | +# python transforming.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\EXTRACTING_REGULATORY_INTERACTIONS\corpus_ecoli\lemma --outputPath C:\Users\cmendezc\Documents\GENOMICAS\EXTRACTING_REGULATORY_INTERACTIONS\corpus_ecoli\transformed --minWordsInLine 5 | ||
46 | + | ||
47 | + | ||
48 | +def length(listWords): | ||
49 | + regexWord = re.compile('[a-zA-Z]') | ||
50 | + words = 0 | ||
51 | + chars = 0 | ||
52 | + for word in listWords: | ||
53 | + listTemp = word.split('|') | ||
54 | + if regexWord.search(listTemp[1]) is not None: | ||
55 | + words += 1 | ||
56 | + chars += len(listTemp[0]) | ||
57 | + return words, chars | ||
58 | + | ||
59 | +########################################################### | ||
60 | +# MAIN PROGRAM # | ||
61 | +########################################################### | ||
62 | + | ||
63 | +if __name__ == "__main__": | ||
64 | + # Parameter definition | ||
65 | + parser = OptionParser() | ||
66 | + parser.add_option("-i", "--inputPath", dest="inputPath", | ||
67 | + help="Path to read input files", metavar="PATH") | ||
68 | + parser.add_option("-o", "--outputPath", dest="outputPath", | ||
69 | + help="Path to place transformed files", metavar="PATH") | ||
70 | + parser.add_option("--minWordsInLine", type="int", dest="minWordsInLine", default=3, | ||
71 | + help="Minimum length sentence in number of words", metavar="NUM") | ||
72 | + parser.add_option("--classes", dest="classes", | ||
73 | + help="Classes to indicate final of sentence when line contains: PMID-NUMSENT-SENT-CLASS", metavar="CLASS,CLASS") | ||
74 | + | ||
75 | + (options, args) = parser.parse_args() | ||
76 | + | ||
77 | + if len(args) > 0: | ||
78 | + parser.error("None parameters indicated.") | ||
79 | + sys.exit(1) | ||
80 | + | ||
81 | + # Printing parameter values | ||
82 | + print('-------------------------------- PARAMETERS --------------------------------') | ||
83 | + print("Path to read input files: " + str(options.inputPath)) | ||
84 | + print("Path to place transformed files: " + str(options.outputPath)) | ||
85 | + print("Minimum length sentence in number of words: " + str(options.minWordsInLine)) | ||
86 | + print("Classes to indicate final of sentence: " + str(options.classes)) | ||
87 | + | ||
88 | + # We realized that POS tags from Biolemmatizer are very specific, therefore we decided to use Standford tags | ||
89 | + bioPOST = False | ||
90 | + filesProcessed = 0 | ||
91 | + # minWordsInLine = 3 | ||
92 | + if not options.classes is None: | ||
93 | + listClasses = options.classes.split(',') | ||
94 | + t0 = time() | ||
95 | + print("Transforming files...") | ||
96 | + # Walk directory to read files | ||
97 | + for path, dirs, files in os.walk(options.inputPath): | ||
98 | + # For each file in dir | ||
99 | + for file in files: | ||
100 | + print(" Transforming file..." + str(file)) | ||
101 | + #TrpR NN TrpR NN PennPOS | ||
102 | + # , , , , NUPOS | ||
103 | + # tryptophan NN tryptophan NN PennPOS | ||
104 | + listLine1 = [] | ||
105 | + listLine2 = [] | ||
106 | + text = '' | ||
107 | + lemma = '' | ||
108 | + pos = '' | ||
109 | + textTransformed = '' | ||
110 | + textText = '' | ||
111 | + with open(os.path.join(path, file), "r", encoding="utf-8", errors="replace") as iFile: | ||
112 | + # Create output file to write | ||
113 | + with open(os.path.join(options.outputPath, file.replace('term.txt', 'tra.txt')), "w", encoding="utf-8") as transformedFile: | ||
114 | + for line in iFile: | ||
115 | + if line == '\n': | ||
116 | + if options.classes is None: | ||
117 | + if length(textTransformed.split())[0] > options.minWordsInLine and length(textTransformed.split())[1] <= 1000: | ||
118 | + transformedFile.write(textTransformed + '\n') | ||
119 | + textTransformed = '' | ||
120 | + textText = '' | ||
121 | + else: | ||
122 | + continue | ||
123 | + else: | ||
124 | + line = line.strip('\n') | ||
125 | + #print('Line ' + str(line.encode(encoding='UTF-8', errors='replace'))) | ||
126 | + listLine1 = line.split('\t') | ||
127 | + if len(listLine1) != 3: | ||
128 | + continue | ||
129 | + text = listLine1[0] | ||
130 | + # Replacing an estrange space character | ||
131 | + text = text.replace(' ', '-') | ||
132 | + listLine2 = listLine1[2].split(' ') | ||
133 | + lemma = listLine2[0] | ||
134 | + # Replacing an estrange space character | ||
135 | + lemma = lemma.replace(' ', '-') | ||
136 | + if bioPOST: | ||
137 | + pos = listLine2[1] | ||
138 | + #print('Line ' + str(line.encode(encoding='UTF-8', errors='replace'))) | ||
139 | + else: | ||
140 | + pos = listLine1[1] | ||
141 | + textText = textText + text + ' ' | ||
142 | + textTransformed = textTransformed + text + '|' + lemma + '|' + pos + ' ' | ||
143 | + # RI+GC NN RI+GC NN PennPOS | ||
144 | + if not options.classes is None: | ||
145 | + if text in listClasses: | ||
146 | + # if length(textTransformed.split()) > options.minWordsInLine: | ||
147 | + if length(textTransformed.split())[0] > options.minWordsInLine and length(textTransformed.split())[1] <= 1000: | ||
148 | + transformedFile.write(textTransformed + '\n') | ||
149 | + # print(textTransformed) | ||
150 | + textTransformed = '' | ||
151 | + textText = '' | ||
152 | + filesProcessed += 1 | ||
153 | + | ||
154 | + # Imprime archivos procesados | ||
155 | + print() | ||
156 | + print("Files processed: " + str(filesProcessed)) | ||
157 | + print("In: %fs" % (time() - t0)) |
-
Please register or login to post a comment