survey_user_input.py 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. # -*- coding: utf-8 -*-
  2. # Part of Odoo. See LICENSE file for full copyright and licensing details.
  3. import logging
  4. import textwrap
  5. import uuid
  6. from dateutil.relativedelta import relativedelta
  7. from odoo import api, fields, models, _
  8. from odoo.exceptions import ValidationError
  9. from odoo.tools import float_is_zero
  10. _logger = logging.getLogger(__name__)
  11. class SurveyUserInput(models.Model):
  12. """ Metadata for a set of one user's answers to a particular survey """
  13. _name = "survey.user_input"
  14. _description = "Survey User Input"
  15. _rec_name = "survey_id"
  16. _order = "create_date desc"
  17. _inherit = ['mail.thread', 'mail.activity.mixin']
  18. # answer description
  19. survey_id = fields.Many2one('survey.survey', string='Survey', required=True, readonly=True, ondelete='cascade')
  20. scoring_type = fields.Selection(string="Scoring", related="survey_id.scoring_type")
  21. start_datetime = fields.Datetime('Start date and time', readonly=True)
  22. end_datetime = fields.Datetime('End date and time', readonly=True)
  23. deadline = fields.Datetime('Deadline', help="Datetime until customer can open the survey and submit answers")
  24. state = fields.Selection([
  25. ('new', 'Not started yet'),
  26. ('in_progress', 'In Progress'),
  27. ('done', 'Completed')], string='Status', default='new', readonly=True)
  28. test_entry = fields.Boolean(readonly=True)
  29. last_displayed_page_id = fields.Many2one('survey.question', string='Last displayed question/page')
  30. # attempts management
  31. is_attempts_limited = fields.Boolean("Limited number of attempts", related='survey_id.is_attempts_limited')
  32. attempts_limit = fields.Integer("Number of attempts", related='survey_id.attempts_limit')
  33. attempts_count = fields.Integer("Attempts Count", compute='_compute_attempts_info')
  34. attempts_number = fields.Integer("Attempt n°", compute='_compute_attempts_info')
  35. survey_time_limit_reached = fields.Boolean("Survey Time Limit Reached", compute='_compute_survey_time_limit_reached')
  36. # identification / access
  37. access_token = fields.Char('Identification token', default=lambda self: str(uuid.uuid4()), readonly=True, required=True, copy=False)
  38. invite_token = fields.Char('Invite token', readonly=True, copy=False) # no unique constraint, as it identifies a pool of attempts
  39. partner_id = fields.Many2one('res.partner', string='Contact', readonly=True)
  40. email = fields.Char('Email', readonly=True)
  41. nickname = fields.Char('Nickname', help="Attendee nickname, mainly used to identify them in the survey session leaderboard.")
  42. # questions / answers
  43. user_input_line_ids = fields.One2many('survey.user_input.line', 'user_input_id', string='Answers', copy=True)
  44. predefined_question_ids = fields.Many2many('survey.question', string='Predefined Questions', readonly=True)
  45. scoring_percentage = fields.Float("Score (%)", compute="_compute_scoring_values", store=True, compute_sudo=True) # stored for perf reasons
  46. scoring_total = fields.Float("Total Score", compute="_compute_scoring_values", store=True, compute_sudo=True) # stored for perf reasons
  47. scoring_success = fields.Boolean('Quizz Passed', compute='_compute_scoring_success', store=True, compute_sudo=True) # stored for perf reasons
  48. # live sessions
  49. is_session_answer = fields.Boolean('Is in a Session', help="Is that user input part of a survey session or not.")
  50. question_time_limit_reached = fields.Boolean("Question Time Limit Reached", compute='_compute_question_time_limit_reached')
  51. _sql_constraints = [
  52. ('unique_token', 'UNIQUE (access_token)', 'An access token must be unique!'),
  53. ]
  54. @api.depends('user_input_line_ids.answer_score', 'user_input_line_ids.question_id', 'predefined_question_ids.answer_score')
  55. def _compute_scoring_values(self):
  56. for user_input in self:
  57. # sum(multi-choice question scores) + sum(simple answer_type scores)
  58. total_possible_score = 0
  59. for question in user_input.predefined_question_ids:
  60. if question.question_type == 'simple_choice':
  61. total_possible_score += max([score for score in question.mapped('suggested_answer_ids.answer_score') if score > 0], default=0)
  62. elif question.question_type == 'multiple_choice':
  63. total_possible_score += sum(score for score in question.mapped('suggested_answer_ids.answer_score') if score > 0)
  64. elif question.is_scored_question:
  65. total_possible_score += question.answer_score
  66. if total_possible_score == 0:
  67. user_input.scoring_percentage = 0
  68. user_input.scoring_total = 0
  69. else:
  70. score_total = sum(user_input.user_input_line_ids.mapped('answer_score'))
  71. user_input.scoring_total = score_total
  72. score_percentage = (score_total / total_possible_score) * 100
  73. user_input.scoring_percentage = round(score_percentage, 2) if score_percentage > 0 else 0
  74. @api.depends('scoring_percentage', 'survey_id')
  75. def _compute_scoring_success(self):
  76. for user_input in self:
  77. user_input.scoring_success = user_input.scoring_percentage >= user_input.survey_id.scoring_success_min
  78. @api.depends(
  79. 'start_datetime',
  80. 'survey_id.is_time_limited',
  81. 'survey_id.time_limit')
  82. def _compute_survey_time_limit_reached(self):
  83. """ Checks that the user_input is not exceeding the survey's time limit. """
  84. for user_input in self:
  85. if not user_input.is_session_answer and user_input.start_datetime:
  86. start_time = user_input.start_datetime
  87. time_limit = user_input.survey_id.time_limit
  88. user_input.survey_time_limit_reached = user_input.survey_id.is_time_limited and \
  89. fields.Datetime.now() >= start_time + relativedelta(minutes=time_limit)
  90. else:
  91. user_input.survey_time_limit_reached = False
  92. @api.depends(
  93. 'survey_id.session_question_id.time_limit',
  94. 'survey_id.session_question_id.is_time_limited',
  95. 'survey_id.session_question_start_time')
  96. def _compute_question_time_limit_reached(self):
  97. """ Checks that the user_input is not exceeding the question's time limit.
  98. Only used in the context of survey sessions. """
  99. for user_input in self:
  100. if user_input.is_session_answer and user_input.survey_id.session_question_start_time:
  101. start_time = user_input.survey_id.session_question_start_time
  102. time_limit = user_input.survey_id.session_question_id.time_limit
  103. user_input.question_time_limit_reached = user_input.survey_id.session_question_id.is_time_limited and \
  104. fields.Datetime.now() >= start_time + relativedelta(seconds=time_limit)
  105. else:
  106. user_input.question_time_limit_reached = False
  107. @api.depends('state', 'test_entry', 'survey_id.is_attempts_limited', 'partner_id', 'email', 'invite_token')
  108. def _compute_attempts_info(self):
  109. attempts_to_compute = self.filtered(
  110. lambda user_input: user_input.state == 'done' and not user_input.test_entry and user_input.survey_id.is_attempts_limited
  111. )
  112. for user_input in (self - attempts_to_compute):
  113. user_input.attempts_count = 1
  114. user_input.attempts_number = 1
  115. if attempts_to_compute:
  116. self.flush_model(['email', 'invite_token', 'partner_id', 'state', 'survey_id', 'test_entry'])
  117. self.env.cr.execute("""
  118. SELECT user_input.id,
  119. COUNT(all_attempts_user_input.id) AS attempts_count,
  120. COUNT(CASE WHEN all_attempts_user_input.id < user_input.id THEN all_attempts_user_input.id END) + 1 AS attempts_number
  121. FROM survey_user_input user_input
  122. LEFT OUTER JOIN survey_user_input all_attempts_user_input
  123. ON user_input.survey_id = all_attempts_user_input.survey_id
  124. AND all_attempts_user_input.state = 'done'
  125. AND all_attempts_user_input.test_entry IS NOT TRUE
  126. AND (user_input.invite_token IS NULL OR user_input.invite_token = all_attempts_user_input.invite_token)
  127. AND (user_input.partner_id = all_attempts_user_input.partner_id OR user_input.email = all_attempts_user_input.email)
  128. WHERE user_input.id IN %s
  129. GROUP BY user_input.id;
  130. """, (tuple(attempts_to_compute.ids),))
  131. attempts_number_results = self.env.cr.dictfetchall()
  132. attempts_number_results = {
  133. attempts_number_result['id']: {
  134. 'attempts_number': attempts_number_result['attempts_number'],
  135. 'attempts_count': attempts_number_result['attempts_count'],
  136. }
  137. for attempts_number_result in attempts_number_results
  138. }
  139. for user_input in attempts_to_compute:
  140. attempts_number_result = attempts_number_results.get(user_input.id, {})
  141. user_input.attempts_number = attempts_number_result.get('attempts_number', 1)
  142. user_input.attempts_count = attempts_number_result.get('attempts_count', 1)
  143. @api.model_create_multi
  144. def create(self, vals_list):
  145. for vals in vals_list:
  146. if 'predefined_question_ids' not in vals:
  147. suvey_id = vals.get('survey_id', self.env.context.get('default_survey_id'))
  148. survey = self.env['survey.survey'].browse(suvey_id)
  149. vals['predefined_question_ids'] = [(6, 0, survey._prepare_user_input_predefined_questions().ids)]
  150. return super(SurveyUserInput, self).create(vals_list)
  151. # ------------------------------------------------------------
  152. # ACTIONS / BUSINESS
  153. # ------------------------------------------------------------
  154. def action_resend(self):
  155. partners = self.env['res.partner']
  156. emails = []
  157. for user_answer in self:
  158. if user_answer.partner_id:
  159. partners |= user_answer.partner_id
  160. elif user_answer.email:
  161. emails.append(user_answer.email)
  162. return self.survey_id.with_context(
  163. default_existing_mode='resend',
  164. default_partner_ids=partners.ids,
  165. default_emails=','.join(emails)
  166. ).action_send_survey()
  167. def action_print_answers(self):
  168. """ Open the website page with the survey form """
  169. self.ensure_one()
  170. return {
  171. 'type': 'ir.actions.act_url',
  172. 'name': "View Answers",
  173. 'target': 'self',
  174. 'url': '/survey/print/%s?answer_token=%s' % (self.survey_id.access_token, self.access_token)
  175. }
  176. def action_redirect_to_attempts(self):
  177. self.ensure_one()
  178. action = self.env['ir.actions.act_window']._for_xml_id('survey.action_survey_user_input')
  179. context = dict(self.env.context or {})
  180. context['create'] = False
  181. context['search_default_survey_id'] = self.survey_id.id
  182. context['search_default_group_by_survey'] = False
  183. if self.partner_id:
  184. context['search_default_partner_id'] = self.partner_id.id
  185. elif self.email:
  186. context['search_default_email'] = self.email
  187. action['context'] = context
  188. return action
  189. @api.model
  190. def _generate_invite_token(self):
  191. return str(uuid.uuid4())
  192. def _mark_in_progress(self):
  193. """ marks the state as 'in_progress' and updates the start_datetime accordingly. """
  194. self.write({
  195. 'start_datetime': fields.Datetime.now(),
  196. 'state': 'in_progress'
  197. })
  198. def _mark_done(self):
  199. """ This method will:
  200. 1. mark the state as 'done'
  201. 2. send the certification email with attached document if
  202. - The survey is a certification
  203. - It has a certification_mail_template_id set
  204. - The user succeeded the test
  205. Will also run challenge Cron to give the certification badge if any."""
  206. self.write({
  207. 'end_datetime': fields.Datetime.now(),
  208. 'state': 'done',
  209. })
  210. Challenge = self.env['gamification.challenge'].sudo()
  211. badge_ids = []
  212. for user_input in self:
  213. if user_input.survey_id.certification and user_input.scoring_success:
  214. if user_input.survey_id.certification_mail_template_id and not user_input.test_entry:
  215. user_input.survey_id.certification_mail_template_id.send_mail(user_input.id, email_layout_xmlid="mail.mail_notification_light")
  216. if user_input.survey_id.certification_give_badge:
  217. badge_ids.append(user_input.survey_id.certification_badge_id.id)
  218. # Update predefined_question_id to remove inactive questions
  219. user_input.predefined_question_ids -= user_input._get_inactive_conditional_questions()
  220. if badge_ids:
  221. challenges = Challenge.search([('reward_id', 'in', badge_ids)])
  222. if challenges:
  223. Challenge._cron_update(ids=challenges.ids, commit=False)
  224. def get_start_url(self):
  225. self.ensure_one()
  226. return '%s?answer_token=%s' % (self.survey_id.get_start_url(), self.access_token)
  227. def get_print_url(self):
  228. self.ensure_one()
  229. return '%s?answer_token=%s' % (self.survey_id.get_print_url(), self.access_token)
  230. # ------------------------------------------------------------
  231. # CREATE / UPDATE LINES FROM SURVEY FRONTEND INPUT
  232. # ------------------------------------------------------------
  233. def save_lines(self, question, answer, comment=None):
  234. """ Save answers to questions, depending on question type
  235. If an answer already exists for question and user_input_id, it will be
  236. overwritten (or deleted for 'choice' questions) (in order to maintain data consistency).
  237. """
  238. old_answers = self.env['survey.user_input.line'].search([
  239. ('user_input_id', '=', self.id),
  240. ('question_id', '=', question.id)
  241. ])
  242. if question.question_type in ['char_box', 'text_box', 'numerical_box', 'date', 'datetime']:
  243. self._save_line_simple_answer(question, old_answers, answer)
  244. if question.save_as_email and answer:
  245. self.write({'email': answer})
  246. if question.save_as_nickname and answer:
  247. self.write({'nickname': answer})
  248. elif question.question_type in ['simple_choice', 'multiple_choice']:
  249. self._save_line_choice(question, old_answers, answer, comment)
  250. elif question.question_type == 'matrix':
  251. self._save_line_matrix(question, old_answers, answer, comment)
  252. else:
  253. raise AttributeError(question.question_type + ": This type of question has no saving function")
  254. def _save_line_simple_answer(self, question, old_answers, answer):
  255. vals = self._get_line_answer_values(question, answer, question.question_type)
  256. if old_answers:
  257. old_answers.write(vals)
  258. return old_answers
  259. else:
  260. return self.env['survey.user_input.line'].create(vals)
  261. def _save_line_choice(self, question, old_answers, answers, comment):
  262. if not (isinstance(answers, list)):
  263. answers = [answers]
  264. if not answers:
  265. # add a False answer to force saving a skipped line
  266. # this will make this question correctly considered as skipped in statistics
  267. answers = [False]
  268. vals_list = []
  269. if question.question_type == 'simple_choice':
  270. if not question.comment_count_as_answer or not question.comments_allowed or not comment:
  271. vals_list = [self._get_line_answer_values(question, answer, 'suggestion') for answer in answers]
  272. elif question.question_type == 'multiple_choice':
  273. vals_list = [self._get_line_answer_values(question, answer, 'suggestion') for answer in answers]
  274. if comment:
  275. vals_list.append(self._get_line_comment_values(question, comment))
  276. old_answers.sudo().unlink()
  277. return self.env['survey.user_input.line'].create(vals_list)
  278. def _save_line_matrix(self, question, old_answers, answers, comment):
  279. vals_list = []
  280. if not answers and question.matrix_row_ids:
  281. # add a False answer to force saving a skipped line
  282. # this will make this question correctly considered as skipped in statistics
  283. answers = {question.matrix_row_ids[0].id: [False]}
  284. if answers:
  285. for row_key, row_answer in answers.items():
  286. for answer in row_answer:
  287. vals = self._get_line_answer_values(question, answer, 'suggestion')
  288. vals['matrix_row_id'] = int(row_key)
  289. vals_list.append(vals.copy())
  290. if comment:
  291. vals_list.append(self._get_line_comment_values(question, comment))
  292. old_answers.sudo().unlink()
  293. return self.env['survey.user_input.line'].create(vals_list)
  294. def _get_line_answer_values(self, question, answer, answer_type):
  295. vals = {
  296. 'user_input_id': self.id,
  297. 'question_id': question.id,
  298. 'skipped': False,
  299. 'answer_type': answer_type,
  300. }
  301. if not answer or (isinstance(answer, str) and not answer.strip()):
  302. vals.update(answer_type=None, skipped=True)
  303. return vals
  304. if answer_type == 'suggestion':
  305. vals['suggested_answer_id'] = int(answer)
  306. elif answer_type == 'numerical_box':
  307. vals['value_numerical_box'] = float(answer)
  308. else:
  309. vals['value_%s' % answer_type] = answer
  310. return vals
  311. def _get_line_comment_values(self, question, comment):
  312. return {
  313. 'user_input_id': self.id,
  314. 'question_id': question.id,
  315. 'skipped': False,
  316. 'answer_type': 'char_box',
  317. 'value_char_box': comment,
  318. }
  319. # ------------------------------------------------------------
  320. # STATISTICS / RESULTS
  321. # ------------------------------------------------------------
  322. def _prepare_statistics(self):
  323. """ Prepares survey.user_input's statistics to display various charts on the frontend.
  324. Returns a structure containing answers statistics "by section" and "totals" for every input in self.
  325. e.g returned structure:
  326. {
  327. survey.user_input(1,): {
  328. 'by_section': {
  329. 'Uncategorized': {
  330. 'question_count': 2,
  331. 'correct': 2,
  332. 'partial': 0,
  333. 'incorrect': 0,
  334. 'skipped': 0,
  335. },
  336. 'Mathematics': {
  337. 'question_count': 3,
  338. 'correct': 1,
  339. 'partial': 1,
  340. 'incorrect': 0,
  341. 'skipped': 1,
  342. },
  343. 'Geography': {
  344. 'question_count': 4,
  345. 'correct': 2,
  346. 'partial': 0,
  347. 'incorrect': 2,
  348. 'skipped': 0,
  349. }
  350. },
  351. 'totals' [{
  352. 'text': 'Correct',
  353. 'count': 5,
  354. }, {
  355. 'text': 'Partially',
  356. 'count': 1,
  357. }, {
  358. 'text': 'Incorrect',
  359. 'count': 2,
  360. }, {
  361. 'text': 'Unanswered',
  362. 'count': 1,
  363. }]
  364. }
  365. }"""
  366. res = dict((user_input, {
  367. 'by_section': {}
  368. }) for user_input in self)
  369. scored_questions = self.mapped('predefined_question_ids').filtered(lambda question: question.is_scored_question)
  370. for question in scored_questions:
  371. if question.question_type in ['simple_choice', 'multiple_choice']:
  372. question_correct_suggested_answers = question.suggested_answer_ids.filtered(lambda answer: answer.is_correct)
  373. question_section = question.page_id.title or _('Uncategorized')
  374. for user_input in self:
  375. user_input_lines = user_input.user_input_line_ids.filtered(lambda line: line.question_id == question)
  376. if question.question_type in ['simple_choice', 'multiple_choice']:
  377. answer_result_key = self._choice_question_answer_result(user_input_lines, question_correct_suggested_answers)
  378. else:
  379. answer_result_key = self._simple_question_answer_result(user_input_lines)
  380. if question_section not in res[user_input]['by_section']:
  381. res[user_input]['by_section'][question_section] = {
  382. 'question_count': 0,
  383. 'correct': 0,
  384. 'partial': 0,
  385. 'incorrect': 0,
  386. 'skipped': 0,
  387. }
  388. res[user_input]['by_section'][question_section]['question_count'] += 1
  389. res[user_input]['by_section'][question_section][answer_result_key] += 1
  390. for user_input in self:
  391. correct_count = 0
  392. partial_count = 0
  393. incorrect_count = 0
  394. skipped_count = 0
  395. for section_counts in res[user_input]['by_section'].values():
  396. correct_count += section_counts.get('correct', 0)
  397. partial_count += section_counts.get('partial', 0)
  398. incorrect_count += section_counts.get('incorrect', 0)
  399. skipped_count += section_counts.get('skipped', 0)
  400. res[user_input]['totals'] = [
  401. {'text': _("Correct"), 'count': correct_count},
  402. {'text': _("Partially"), 'count': partial_count},
  403. {'text': _("Incorrect"), 'count': incorrect_count},
  404. {'text': _("Unanswered"), 'count': skipped_count}
  405. ]
  406. return res
  407. def _choice_question_answer_result(self, user_input_lines, question_correct_suggested_answers):
  408. correct_user_input_lines = user_input_lines.filtered(lambda line: line.answer_is_correct and not line.skipped).mapped('suggested_answer_id')
  409. incorrect_user_input_lines = user_input_lines.filtered(lambda line: not line.answer_is_correct and not line.skipped)
  410. if question_correct_suggested_answers and correct_user_input_lines == question_correct_suggested_answers:
  411. return 'correct'
  412. elif correct_user_input_lines and correct_user_input_lines < question_correct_suggested_answers:
  413. return 'partial'
  414. elif not correct_user_input_lines and incorrect_user_input_lines:
  415. return 'incorrect'
  416. else:
  417. return 'skipped'
  418. def _simple_question_answer_result(self, user_input_line):
  419. if user_input_line.skipped:
  420. return 'skipped'
  421. elif user_input_line.answer_is_correct:
  422. return 'correct'
  423. else:
  424. return 'incorrect'
  425. # ------------------------------------------------------------
  426. # Conditional Questions Management
  427. # ------------------------------------------------------------
  428. def _get_conditional_values(self):
  429. """ For survey containing conditional questions, we need a triggered_questions_by_answer map that contains
  430. {key: answer, value: the question that the answer triggers, if selected},
  431. The idea is to be able to verify, on every answer check, if this answer is triggering the display
  432. of another question.
  433. If answer is not in the conditional map:
  434. - nothing happens.
  435. If the answer is in the conditional map:
  436. - If we are in ONE PAGE survey : (handled at CLIENT side)
  437. -> display immediately the depending question
  438. - If we are in PAGE PER SECTION : (handled at CLIENT side)
  439. - If related question is on the same page :
  440. -> display immediately the depending question
  441. - If the related question is not on the same page :
  442. -> keep the answers in memory and check at next page load if the depending question is in there and
  443. display it, if so.
  444. - If we are in PAGE PER QUESTION : (handled at SERVER side)
  445. -> During submit, determine which is the next question to display getting the next question
  446. that is the next in sequence and that is either not triggered by another question's answer, or that
  447. is triggered by an already selected answer.
  448. To do all this, we need to return:
  449. - list of all selected answers: [answer_id1, answer_id2, ...] (for survey reloading, otherwise, this list is
  450. updated at client side)
  451. - triggered_questions_by_answer: dict -> for a given answer, list of questions triggered by this answer;
  452. Used mainly for dynamic show/hide behaviour at client side
  453. - triggering_answer_by_question: dict -> for a given question, the answer that triggers it
  454. Used mainly to ease template rendering
  455. """
  456. triggering_answer_by_question, triggered_questions_by_answer = {}, {}
  457. # Ignore conditional configuration if randomised questions selection
  458. if self.survey_id.questions_selection != 'random':
  459. triggering_answer_by_question, triggered_questions_by_answer = self.survey_id._get_conditional_maps()
  460. selected_answers = self._get_selected_suggested_answers()
  461. return triggering_answer_by_question, triggered_questions_by_answer, selected_answers
  462. def _get_selected_suggested_answers(self):
  463. """
  464. For now, only simple and multiple choices question type are handled by the conditional questions feature.
  465. Mapping all the suggested answers selected by the user will also include answers from matrix question type,
  466. Those ones won't be used.
  467. Maybe someday, conditional questions feature will be extended to work with matrix question.
  468. :return: all the suggested answer selected by the user.
  469. """
  470. return self.mapped('user_input_line_ids.suggested_answer_id')
  471. def _clear_inactive_conditional_answers(self):
  472. """
  473. Clean eventual answers on conditional questions that should not have been displayed to user.
  474. This method is used mainly for page per question survey, a similar method does the same treatment
  475. at client side for the other survey layouts.
  476. E.g.: if depending answer was uncheck after answering conditional question, we need to clear answers
  477. of that conditional question, for two reasons:
  478. - ensure correct scoring
  479. - if the selected answer triggers another question later in the survey, if the answer is not cleared,
  480. a question that should not be displayed to the user will be.
  481. TODO DBE: Maybe this can be the only cleaning method, even for section_per_page or one_page where
  482. conditional questions are, for now, cleared in JS directly. But this can be annoying if user typed a long
  483. answer, changed their mind unchecking depending answer and changed again their mind by rechecking the depending
  484. answer -> For now, the long answer will be lost. If we use this as the master cleaning method,
  485. long answer will be cleared only during submit.
  486. """
  487. inactive_questions = self._get_inactive_conditional_questions()
  488. # delete user.input.line on question that should not be answered.
  489. answers_to_delete = self.user_input_line_ids.filtered(lambda answer: answer.question_id in inactive_questions)
  490. answers_to_delete.unlink()
  491. def _get_inactive_conditional_questions(self):
  492. triggering_answer_by_question, triggered_questions_by_answer, selected_answers = self._get_conditional_values()
  493. # get questions that should not be answered
  494. inactive_questions = self.env['survey.question']
  495. for answer in triggered_questions_by_answer.keys():
  496. if answer not in selected_answers:
  497. for question in triggered_questions_by_answer[answer]:
  498. inactive_questions |= question
  499. return inactive_questions
  500. def _get_print_questions(self):
  501. """ Get the questions to display : the ones that should have been answered = active questions
  502. In case of session, active questions are based on most voted answers
  503. :return: active survey.question browse records
  504. """
  505. survey = self.survey_id
  506. if self.is_session_answer:
  507. most_voted_answers = survey._get_session_most_voted_answers()
  508. inactive_questions = most_voted_answers._get_inactive_conditional_questions()
  509. else:
  510. inactive_questions = self._get_inactive_conditional_questions()
  511. return survey.question_ids - inactive_questions
  512. # ------------------------------------------------------------
  513. # MESSAGING
  514. # ------------------------------------------------------------
  515. def _message_get_suggested_recipients(self):
  516. recipients = super()._message_get_suggested_recipients()
  517. for user_input in self:
  518. if user_input.partner_id:
  519. user_input._message_add_suggested_recipient(
  520. recipients,
  521. partner=user_input.partner_id,
  522. reason=_('Survey Participant')
  523. )
  524. return recipients
  525. class SurveyUserInputLine(models.Model):
  526. _name = 'survey.user_input.line'
  527. _description = 'Survey User Input Line'
  528. _rec_name = 'user_input_id'
  529. _order = 'question_sequence, id'
  530. # survey data
  531. user_input_id = fields.Many2one('survey.user_input', string='User Input', ondelete='cascade', required=True, index=True)
  532. survey_id = fields.Many2one(related='user_input_id.survey_id', string='Survey', store=True, readonly=False)
  533. question_id = fields.Many2one('survey.question', string='Question', ondelete='cascade', required=True)
  534. page_id = fields.Many2one(related='question_id.page_id', string="Section", readonly=False)
  535. question_sequence = fields.Integer('Sequence', related='question_id.sequence', store=True)
  536. # answer
  537. skipped = fields.Boolean('Skipped')
  538. answer_type = fields.Selection([
  539. ('text_box', 'Free Text'),
  540. ('char_box', 'Text'),
  541. ('numerical_box', 'Number'),
  542. ('date', 'Date'),
  543. ('datetime', 'Datetime'),
  544. ('suggestion', 'Suggestion')], string='Answer Type')
  545. value_char_box = fields.Char('Text answer')
  546. value_numerical_box = fields.Float('Numerical answer')
  547. value_date = fields.Date('Date answer')
  548. value_datetime = fields.Datetime('Datetime answer')
  549. value_text_box = fields.Text('Free Text answer')
  550. suggested_answer_id = fields.Many2one('survey.question.answer', string="Suggested answer")
  551. matrix_row_id = fields.Many2one('survey.question.answer', string="Row answer")
  552. # scoring
  553. answer_score = fields.Float('Score')
  554. answer_is_correct = fields.Boolean('Correct')
  555. @api.depends('answer_type')
  556. def _compute_display_name(self):
  557. for line in self:
  558. if line.answer_type == 'char_box':
  559. line.display_name = line.value_char_box
  560. elif line.answer_type == 'text_box' and line.value_text_box:
  561. line.display_name = textwrap.shorten(line.value_text_box, width=50, placeholder=" [...]")
  562. elif line.answer_type == 'numerical_box':
  563. line.display_name = line.value_numerical_box
  564. elif line.answer_type == 'date':
  565. line.display_name = fields.Date.to_string(line.value_date)
  566. elif line.answer_type == 'datetime':
  567. line.display_name = fields.Datetime.to_string(line.value_datetime)
  568. elif line.answer_type == 'suggestion':
  569. if line.matrix_row_id:
  570. line.display_name = '%s: %s' % (
  571. line.suggested_answer_id.value,
  572. line.matrix_row_id.value)
  573. else:
  574. line.display_name = line.suggested_answer_id.value
  575. if not line.display_name:
  576. line.display_name = _('Skipped')
  577. @api.constrains('skipped', 'answer_type')
  578. def _check_answer_type_skipped(self):
  579. for line in self:
  580. if (line.skipped == bool(line.answer_type)):
  581. raise ValidationError(_('A question can either be skipped or answered, not both.'))
  582. # allow 0 for numerical box
  583. if line.answer_type == 'numerical_box' and float_is_zero(line['value_numerical_box'], precision_digits=6):
  584. continue
  585. if line.answer_type == 'suggestion':
  586. field_name = 'suggested_answer_id'
  587. elif line.answer_type:
  588. field_name = 'value_%s' % line.answer_type
  589. else: # skipped
  590. field_name = False
  591. if field_name and not line[field_name]:
  592. raise ValidationError(_('The answer must be in the right type'))
  593. @api.model_create_multi
  594. def create(self, vals_list):
  595. for vals in vals_list:
  596. if not vals.get('answer_score'):
  597. score_vals = self._get_answer_score_values(vals)
  598. vals.update(score_vals)
  599. return super(SurveyUserInputLine, self).create(vals_list)
  600. def write(self, vals):
  601. res = True
  602. for line in self:
  603. vals_copy = {**vals}
  604. getter_params = {
  605. 'user_input_id': line.user_input_id.id,
  606. 'answer_type': line.answer_type,
  607. 'question_id': line.question_id.id,
  608. **vals_copy
  609. }
  610. if not vals_copy.get('answer_score'):
  611. score_vals = self._get_answer_score_values(getter_params, compute_speed_score=False)
  612. vals_copy.update(score_vals)
  613. res = super(SurveyUserInputLine, line).write(vals_copy) and res
  614. return res
  615. @api.model
  616. def _get_answer_score_values(self, vals, compute_speed_score=True):
  617. """ Get values for: answer_is_correct and associated answer_score.
  618. Requires vals to contain 'answer_type', 'question_id', and 'user_input_id'.
  619. Depending on 'answer_type' additional value of 'suggested_answer_id' may also be
  620. required.
  621. Calculates whether an answer_is_correct and its score based on 'answer_type' and
  622. corresponding question. Handles choice (answer_type == 'suggestion') questions
  623. separately from other question types. Each selected choice answer is handled as an
  624. individual answer.
  625. If score depends on the speed of the answer, it is adjusted as follows:
  626. - If the user answers in less than 2 seconds, they receive 100% of the possible points.
  627. - If user answers after that, they receive 50% of the possible points + the remaining
  628. 50% scaled by the time limit and time taken to answer [i.e. a minimum of 50% of the
  629. possible points is given to all correct answers]
  630. Example of returned values:
  631. * {'answer_is_correct': False, 'answer_score': 0} (default)
  632. * {'answer_is_correct': True, 'answer_score': 2.0}
  633. """
  634. user_input_id = vals.get('user_input_id')
  635. answer_type = vals.get('answer_type')
  636. question_id = vals.get('question_id')
  637. if not question_id:
  638. raise ValueError(_('Computing score requires a question in arguments.'))
  639. question = self.env['survey.question'].browse(int(question_id))
  640. # default and non-scored questions
  641. answer_is_correct = False
  642. answer_score = 0
  643. # record selected suggested choice answer_score (can be: pos, neg, or 0)
  644. if question.question_type in ['simple_choice', 'multiple_choice']:
  645. if answer_type == 'suggestion':
  646. suggested_answer_id = vals.get('suggested_answer_id')
  647. if suggested_answer_id:
  648. question_answer = self.env['survey.question.answer'].browse(int(suggested_answer_id))
  649. answer_score = question_answer.answer_score
  650. answer_is_correct = question_answer.is_correct
  651. # for all other scored question cases, record question answer_score (can be: pos or 0)
  652. elif question.question_type in ['date', 'datetime', 'numerical_box']:
  653. answer = vals.get('value_%s' % answer_type)
  654. if answer_type == 'numerical_box':
  655. answer = float(answer)
  656. elif answer_type == 'date':
  657. answer = fields.Date.from_string(answer)
  658. elif answer_type == 'datetime':
  659. answer = fields.Datetime.from_string(answer)
  660. if answer and answer == question['answer_%s' % answer_type]:
  661. answer_is_correct = True
  662. answer_score = question.answer_score
  663. if compute_speed_score and answer_score > 0:
  664. user_input = self.env['survey.user_input'].browse(user_input_id)
  665. session_speed_rating = user_input.exists() and user_input.is_session_answer and user_input.survey_id.session_speed_rating
  666. if session_speed_rating:
  667. max_score_delay = 2
  668. time_limit = question.time_limit
  669. now = fields.Datetime.now()
  670. seconds_to_answer = (now - user_input.survey_id.session_question_start_time).total_seconds()
  671. question_remaining_time = time_limit - seconds_to_answer
  672. # if answered within the max_score_delay => leave score as is
  673. if question_remaining_time < 0: # if no time left
  674. answer_score /= 2
  675. elif seconds_to_answer > max_score_delay:
  676. time_limit -= max_score_delay # we remove the max_score_delay to have all possible values
  677. score_proportion = (time_limit - seconds_to_answer) / time_limit
  678. answer_score = (answer_score / 2) * (1 + score_proportion)
  679. return {
  680. 'answer_is_correct': answer_is_correct,
  681. 'answer_score': answer_score
  682. }