parsedocx.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. #-*-coding:utf-8 -*-
  2. import re,os
  3. import json,uuid
  4. from bs4 import BeautifulSoup
  5. from win32com.client import Dispatch
  6. import pythoncom
  7. from upload_to_oss import TedOSS
  8. import threading
  9. import shutil
  10. class DocxConverter(object):
  11. """
  12. """
  13. def __init__(self,docpath="test4.docx"):
  14. """
  15. """
  16. self.docpath = docpath
  17. self.oss = TedOSS()
  18. def upload_imgfiles(self,uuidhtml):
  19. """
  20. """
  21. imgroot = os.path.join(os.path.dirname(self.docpath))
  22. imgdir = os.path.join(imgroot,uuidhtml+".files")
  23. if os.path.exists(imgdir):
  24. for file in os.listdir(imgdir):
  25. imgfile = os.path.join(imgdir,file)
  26. ossfile = uuidhtml+".files/"+file
  27. self.oss.upload_from_local(imgfile,ossfile)
  28. shutil.rmtree(imgdir)
  29. pythoncom.CoUninitialize()
  30. def docx2html(self):
  31. """
  32. """
  33. pythoncom.CoInitialize()
  34. self.word = Dispatch("Word.Application")
  35. self.word.Visible = 0
  36. self.doc = self.word.Documents.Open(self.docpath)
  37. self.uuidhtml = str(uuid.uuid4())
  38. html = os.path.join(os.path.dirname(self.docpath),self.uuidhtml+".html")
  39. self.doc.SaveAs(html,10)
  40. self.doc.Close()
  41. self.word.Quit()
  42. os.remove(self.docpath)
  43. #self.upload_imgfiles(self.uuidhtml)
  44. task = threading.Thread(target=self.upload_imgfiles,args=(self.uuidhtml,))
  45. task.start()
  46. return html
  47. class QuestionsParser(object):
  48. """试题解析
  49. """
  50. def __init__(self,name="test4.html"):
  51. self.html = open(name,"r").read()
  52. self.soup = BeautifulSoup(self.html,"html.parser")
  53. def get_paragraphs(self):
  54. """
  55. """
  56. wordsection = self.soup.find("div",class_="WordSection1")
  57. #print wordsection
  58. pars = wordsection.find_all("p")
  59. return pars
  60. def parse_questions(self):
  61. """提取试题
  62. """
  63. que_type_dct = {}
  64. paragraphs = self.get_paragraphs()
  65. for i,p in enumerate(paragraphs):
  66. print p.text
  67. if u"【题型】" in p.text:
  68. que_type_dct["type"] = p.text.split("、")[-1]
  69. def parse_questions(self):
  70. """提取试题
  71. """
  72. data = []
  73. tmp_val = {}
  74. tx_name = ""
  75. key = ""
  76. paragraphs = self.get_paragraphs()
  77. for i,p in enumerate(paragraphs):
  78. if u"【题型】" in p.text:
  79. tx_name = p.text
  80. if u"【题干】" in p.text:
  81. key = "tg"
  82. tmp_val["tx"] = tx_name
  83. if tmp_val.get("tg"):
  84. data.append(tmp_val)
  85. tmp_val = {"tg":"","tx":"","zsd":"","nd":"","da":"","jx":""}
  86. if u"【知识点】" in p.text:
  87. key = "zsd"
  88. if u"【难度】" in p.text:
  89. key = "nd"
  90. if u"【答案】" in p.text:
  91. key = "da"
  92. if u"【解析】" in p.text:
  93. key = "jx"
  94. if key != "":
  95. if "<img" in p.__str__():
  96. content = p.__str__()
  97. host = "http://scxjcclub.oss-cn-beijing.aliyuncs.com/say365/"
  98. src = re.search('src=".*\.files.*[\.jpg\.png]"',content).group().split("=")[-1].replace('"','')
  99. content = re.sub('src=".*\.files.*[\.jpg\.png]"','src="'+host+src+'"',content)
  100. tmp_val[key] += content
  101. else:
  102. tmp_val[key] += p.__str__()
  103. data.append(tmp_val)
  104. return data
  105. def get_questions(self):
  106. """
  107. """
  108. questions = self.parse_questions()
  109. for que in questions:
  110. que["tx"] = que["tx"].split(u"、")[-1]
  111. #que["tg"] = que["tg"].replace(u"【题干】","")
  112. #que["zsd"] = que["zsd"].replace(u"【知识点】","")
  113. #que["da"] = que["da"].replace(u"【答案】","")
  114. #que["jx"] = que["jx"].replace(u"【解析】","")
  115. que["qno"] = self.get_qno(que["tg"])
  116. return questions
  117. def get_qno(self,tg):
  118. """提取题号
  119. """
  120. tgsoup = BeautifulSoup(tg,"html.parser")
  121. tgtext = tgsoup.text
  122. qno = re.search(r"\d+",tgtext.split(u"、")[0]).group()
  123. return qno
  124. #docxconverter = DocxConverter()
  125. #questionparser = QuestionsParser()
  126. if __name__ == "__main__":
  127. #ques = questionparser.get_questions()
  128. #with open("t.json","w+") as f:
  129. # f.write(json.dumps(ques))
  130. #docxconverter.docx2html()
  131. pass