fix device
Browse files- app.py +4 -5
- app_test.py +4 -5
app.py
CHANGED
|
@@ -237,7 +237,6 @@ def buildmodel(**kwargs):
|
|
| 237 |
seed = kwargs.get('seed', None)
|
| 238 |
input_model = kwargs.get('input_model', None)
|
| 239 |
quantizer_version = kwargs.get('quantizer_version', 'v4')
|
| 240 |
-
device = "cuda"
|
| 241 |
|
| 242 |
set_seed(seed)
|
| 243 |
# old_tokenizer = AutoTokenizer.from_pretrained(input_model, trust_remote_code=True)
|
|
@@ -289,7 +288,7 @@ def buildmodel(**kwargs):
|
|
| 289 |
subfolder="checkpoint-26000", # 加载检查点目录
|
| 290 |
config=model_args,
|
| 291 |
# cache_dir="/openseg_blob/v-yanbin/GradioDemo/cache_dir",
|
| 292 |
-
)
|
| 293 |
# model = CrelloModel(config=model_args)
|
| 294 |
|
| 295 |
tokenizer.add_special_tokens({"mask_token": "<mask>"})
|
|
@@ -327,7 +326,7 @@ def construction_layout():
|
|
| 327 |
# print('after token embeddings to match the tokenizer', 129423)
|
| 328 |
|
| 329 |
print("before .to(device)")
|
| 330 |
-
model = model.to(
|
| 331 |
print("after .to(device)")
|
| 332 |
model = model.bfloat16()
|
| 333 |
model.eval()
|
|
@@ -339,8 +338,8 @@ def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, device, do_s
|
|
| 339 |
json_example = inputs
|
| 340 |
input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
|
| 341 |
inputs = tokenizer(
|
| 342 |
-
|
| 343 |
-
|
| 344 |
|
| 345 |
stopping_criteria = StoppingCriteriaList()
|
| 346 |
stopping_criteria.append(StopAtSpecificTokenCriteria(token_id_list=[128000]))
|
|
|
|
| 237 |
seed = kwargs.get('seed', None)
|
| 238 |
input_model = kwargs.get('input_model', None)
|
| 239 |
quantizer_version = kwargs.get('quantizer_version', 'v4')
|
|
|
|
| 240 |
|
| 241 |
set_seed(seed)
|
| 242 |
# old_tokenizer = AutoTokenizer.from_pretrained(input_model, trust_remote_code=True)
|
|
|
|
| 288 |
subfolder="checkpoint-26000", # 加载检查点目录
|
| 289 |
config=model_args,
|
| 290 |
# cache_dir="/openseg_blob/v-yanbin/GradioDemo/cache_dir",
|
| 291 |
+
).to("cuda")
|
| 292 |
# model = CrelloModel(config=model_args)
|
| 293 |
|
| 294 |
tokenizer.add_special_tokens({"mask_token": "<mask>"})
|
|
|
|
| 326 |
# print('after token embeddings to match the tokenizer', 129423)
|
| 327 |
|
| 328 |
print("before .to(device)")
|
| 329 |
+
model = model.to("cuda")
|
| 330 |
print("after .to(device)")
|
| 331 |
model = model.bfloat16()
|
| 332 |
model.eval()
|
|
|
|
| 338 |
json_example = inputs
|
| 339 |
input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
|
| 340 |
inputs = tokenizer(
|
| 341 |
+
input_intension, return_tensors="pt"
|
| 342 |
+
).to("cuda")
|
| 343 |
|
| 344 |
stopping_criteria = StoppingCriteriaList()
|
| 345 |
stopping_criteria.append(StopAtSpecificTokenCriteria(token_id_list=[128000]))
|
app_test.py
CHANGED
|
@@ -237,7 +237,6 @@ def buildmodel(**kwargs):
|
|
| 237 |
seed = kwargs.get('seed', None)
|
| 238 |
input_model = kwargs.get('input_model', None)
|
| 239 |
quantizer_version = kwargs.get('quantizer_version', 'v4')
|
| 240 |
-
device = "cuda"
|
| 241 |
|
| 242 |
set_seed(seed)
|
| 243 |
# old_tokenizer = AutoTokenizer.from_pretrained(input_model, trust_remote_code=True)
|
|
@@ -289,7 +288,7 @@ def buildmodel(**kwargs):
|
|
| 289 |
subfolder="checkpoint-26000", # 加载检查点目录
|
| 290 |
config=model_args,
|
| 291 |
cache_dir="/openseg_blob/v-yanbin/GradioDemo/cache_dir",
|
| 292 |
-
)
|
| 293 |
# model = CrelloModel(config=model_args)
|
| 294 |
|
| 295 |
tokenizer.add_special_tokens({"mask_token": "<mask>"})
|
|
@@ -327,7 +326,7 @@ def construction_layout():
|
|
| 327 |
# print('after token embeddings to match the tokenizer', 129423)
|
| 328 |
|
| 329 |
print("before .to(device)")
|
| 330 |
-
model = model.to(
|
| 331 |
print("after .to(device)")
|
| 332 |
model = model.bfloat16()
|
| 333 |
model.eval()
|
|
@@ -339,8 +338,8 @@ def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, device, do_s
|
|
| 339 |
json_example = inputs
|
| 340 |
input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
|
| 341 |
inputs = tokenizer(
|
| 342 |
-
|
| 343 |
-
|
| 344 |
|
| 345 |
stopping_criteria = StoppingCriteriaList()
|
| 346 |
stopping_criteria.append(StopAtSpecificTokenCriteria(token_id_list=[128000]))
|
|
|
|
| 237 |
seed = kwargs.get('seed', None)
|
| 238 |
input_model = kwargs.get('input_model', None)
|
| 239 |
quantizer_version = kwargs.get('quantizer_version', 'v4')
|
|
|
|
| 240 |
|
| 241 |
set_seed(seed)
|
| 242 |
# old_tokenizer = AutoTokenizer.from_pretrained(input_model, trust_remote_code=True)
|
|
|
|
| 288 |
subfolder="checkpoint-26000", # 加载检查点目录
|
| 289 |
config=model_args,
|
| 290 |
cache_dir="/openseg_blob/v-yanbin/GradioDemo/cache_dir",
|
| 291 |
+
).to("cuda")
|
| 292 |
# model = CrelloModel(config=model_args)
|
| 293 |
|
| 294 |
tokenizer.add_special_tokens({"mask_token": "<mask>"})
|
|
|
|
| 326 |
# print('after token embeddings to match the tokenizer', 129423)
|
| 327 |
|
| 328 |
print("before .to(device)")
|
| 329 |
+
model = model.to("cuda")
|
| 330 |
print("after .to(device)")
|
| 331 |
model = model.bfloat16()
|
| 332 |
model.eval()
|
|
|
|
| 338 |
json_example = inputs
|
| 339 |
input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
|
| 340 |
inputs = tokenizer(
|
| 341 |
+
input_intension, return_tensors="pt"
|
| 342 |
+
).to("cuda")
|
| 343 |
|
| 344 |
stopping_criteria = StoppingCriteriaList()
|
| 345 |
stopping_criteria.append(StopAtSpecificTokenCriteria(token_id_list=[128000]))
|