Skip to content
This repository has been archived by the owner on Jun 4, 2021. It is now read-only.

Commit

Permalink
last fixes to ensure running on new system
Browse files Browse the repository at this point in the history
  • Loading branch information
selBaez committed May 31, 2020
1 parent 305762c commit e40435d
Show file tree
Hide file tree
Showing 12 changed files with 69 additions and 23 deletions.
29 changes: 29 additions & 0 deletions mac_requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
numpy==1.16.2
scipy==1.2.3
sklearn

nltk==3.4
fuzzywuzzy
python-Levenshtein

rdflib
iribaker
SPARQLWrapper

PyObjC
playsound
webrtcvad

google-cloud-speech
google-cloud-TextToSpeech
google-cloud-translate

Pillow==5.4.1
opencv-python

tornado==5.1.1
typing
pyaudio

pycountry
reverse-geocoder
2 changes: 1 addition & 1 deletion pepper/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@


# Global Logging Setup
LOGGING_LEVEL = logging.DEBUG
LOGGING_LEVEL = logging.INFO # INFO for cleaner experiments
LOGGING_FILE = 'log.txt'
LOGGING_FORMAT = '%(asctime)s %(levelname)-8s %(name)-35s %(message)s'
LOGGING_DATE_FORMAT = '%x %X'
Expand Down
9 changes: 8 additions & 1 deletion pepper/brain/LTM_statement_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,14 @@ def _link_entity(self, entity, graph, namespace_mapping=None):


def _create_detections(self, cntxt, context):
"""
Parameters
----------
self : brain
cntxt: Context
context: Entity
"""
# Get ids of existing objects in this location
memory = self.location_reasoner.get_location_memory(cntxt)

Expand Down Expand Up @@ -63,7 +71,6 @@ def _create_detections(self, cntxt, context):
# Create detection
objct_detection = create_claim_graph(self, self.myself, prdt, objct, UtteranceType.EXPERIENCE)
self.claim_graph.add((objct_detection.id, self.namespaces['EPS']['hasContext'], context.id))

observations.append(objct_detection)

# Open ended learning
Expand Down
5 changes: 4 additions & 1 deletion pepper/brain/infrastructure/rdf_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,10 @@ def _fix_nlp_types(self, types):
# this was just a char
fixed_types.append(types.split('.')[-1])
break
elif "article" in el or "prep" in el or "adj" in el:
elif "article" in el or "prep" in el or "adj" in el or "verb" in el:
pass
elif "deictic" in el or "article:definite" in el:
# need to corefer
pass
elif '.' in el:
fixed_types.append(el.split('.')[-1])
Expand Down
3 changes: 2 additions & 1 deletion pepper/brain/queries/content exploration/count_statements.rq
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX grasp: <http://groundedannotationframework.org/grasp#>
PREFIX gaf: <http://groundedannotationframework.org/gaf#>

select (COUNT(?stat) AS ?count) where {
select (COUNT(distinct ?stat) AS ?count) where {
?stat rdf:type gaf:Assertion .
?stat gaf:denotedBy ?m .
}
2 changes: 1 addition & 1 deletion pepper/brain/queries/trust/count_statements_by.rq
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ PREFIX gaf: <http://groundedannotationframework.org/gaf#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX prov: <http://www.w3.org/ns/prov#>

select (COUNT(?stat) AS ?num_stat) where {
select (COUNT(distinct ?stat) AS ?num_stat) where {
?stat rdf:type gaf:Assertion .
?stat gaf:denotedBy ?m .
?m grasp:wasAttributedTo ?author .
Expand Down
4 changes: 2 additions & 2 deletions pepper/brain/reasoners/location_reasoner.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def get_location_memory(self, cntxt):
response = self._submit_query(query)

location_memory = {}
if response[0]['type']['value'] != '':
if response and response[0]['type']['value'] != '':
for elem in response:
categories, ids = self._fill_location_memory_(elem)
# assign multiple categories (eg selene is person and agent)
Expand All @@ -97,7 +97,7 @@ def get_location_memory(self, cntxt):
location_memory[casefold_text(category, format='triple')] = temp

# Local object memories
for item in cntxt.objects: # Error, this skips the first element?
for item in cntxt.objects: # Error, this skips the first element?
if item.name.lower() != 'person':
temp = location_memory.get(casefold_text(item.name, format='triple'),
{'brain_ids': [], 'local_ids': []})
Expand Down
18 changes: 9 additions & 9 deletions pepper/brain/reasoners/thought_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def get_statement_novelty(self, statement_uri):
query = read_query('thoughts/statement_novelty') % statement_uri
response = self._submit_query(query)

if response[0] != {}:
if response and response[0] != {}:
response = [self._fill_statement_novelty_(elem) for elem in response]
else:
response = []
Expand Down Expand Up @@ -193,11 +193,11 @@ def get_overlaps(self, utterance):
"""
# Role as subject
query = read_query('thoughts/object_overlap') % (
utterance.triple.predicate_name, utterance.triple.complement_name,
utterance.triple.subject_name)
utterance.triple.predicate_name, utterance.triple.complement_name,
utterance.triple.subject_name)
response = self._submit_query(query)

if response[0]['types']['value'] != '':
if response and response[0]['types']['value'] != '':
complement_overlap = [self._fill_overlap_(elem) for elem in response]
else:
complement_overlap = []
Expand All @@ -208,7 +208,7 @@ def get_overlaps(self, utterance):
utterance.triple.complement_name)
response = self._submit_query(query)

if response[0]['types']['value'] != '':
if response and response[0]['types']['value'] != '':
subject_overlap = [self._fill_overlap_(elem) for elem in response]
else:
subject_overlap = []
Expand Down Expand Up @@ -303,7 +303,7 @@ def get_complement_cardinality_conflicts(self, utterance):
utterance.triple.complement_name)

response = self._submit_query(query)
if response[0] != {}:
if response and response[0] != {}:
conflicts = [self._fill_cardinality_conflict_(elem) for elem in response]
else:
conflicts = []
Expand All @@ -327,7 +327,7 @@ def get_negation_conflicts(self, utterance):
utterance.triple.complement_name)

response = self._submit_query(query)
if response[0] != {}:
if response and response[0] != {}:
conflicts = [self._fill_negation_conflict_(elem) for elem in response]
else:
conflicts = []
Expand All @@ -350,7 +350,7 @@ def get_trust(self, speaker):
# chat based feature
num_chats = float(self.count_chat_with(speaker))
friends = self.get_best_friends()
best_friend_chats = float(friends[0][1])
best_friend_chats = float(friends[0][1]) if friends else num_chats
chat_feature = num_chats / best_friend_chats

# new content feature
Expand All @@ -361,7 +361,7 @@ def get_trust(self, speaker):
# conflicts feature
num_conflicts = float(len(self.get_conflicts_by(speaker)))
all_conflicts = float(len(self.get_conflicts()))
conflicts_feature = (num_conflicts / all_conflicts) - 1
conflicts_feature = -(num_conflicts / all_conflicts) - 1 if all_conflicts != 0 else 1

# Aggregate
# TODO scale
Expand Down
9 changes: 7 additions & 2 deletions pepper/framework/backend/system/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,16 @@ def _run(self):
t0 = time()

# Get frame from camera
status, image = self._camera.read()
# Sometimes the camera fails on the first image. We introduce a three trial policy to initialize the camera
status = False
image = None
for chance in range(3):
status, image = self._camera.read()
if status:
break

if status:
if self._running:

# Resize Image and Convert to RGB
image = cv2.resize(image, (self.width, self.height))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
Expand Down
1 change: 1 addition & 0 deletions pepper/framework/backend/system/text_to_speech.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def _play_sound(self, mp3):
playsound(file_hash)
finally:
if os.path.exists(file_hash):
# TODO: Sometimes we need to save all data from an experiment. Comment the line below and pass
os.remove(file_hash)


2 changes: 1 addition & 1 deletion pepper/language/language.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ def _patch_names(self, hypotheses):
@staticmethod
def _get_closest_name(word, names=config.PEOPLE_FRIENDS_NAMES, max_name_distance=2):
# type: (str, List[str], int) -> str
if word[0].isupper():
if word[0].isupper() and names:
name, distance = sorted([(name, edit_distance(name, word)) for name in names], key=lambda key: key[1])[0]

if distance <= max_name_distance:
Expand Down
8 changes: 4 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
numpy
scipy
numpy==1.16.2
scipy==1.2.3
sklearn

nltk
nltk==3.4
fuzzywuzzy[speedup]

rdflib
Expand All @@ -22,6 +22,6 @@ reverse_geocoder
pillow
opencv-python

tornado
tornado==5.1.1
typing
pyaudio

0 comments on commit e40435d

Please sign in to comment.